1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // Test host codegen. 3 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK1 4 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 5 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK2 6 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK3 7 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 8 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK4 9 10 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 11 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 12 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 13 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 14 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 15 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 16 17 // Test target codegen - host bc file has to be created first. 18 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 19 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK9 20 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 21 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK10 22 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 23 // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK11 24 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 25 // RUN: %clang_cc1 -fopenmp -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK12 26 27 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 28 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 29 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 30 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 31 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 32 // RUN: %clang_cc1 -verify -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 33 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 34 // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=45 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 35 36 // Test host codegen. 37 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK17 38 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 39 // RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK18 40 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK19 41 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 42 // RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK20 43 44 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 45 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 46 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 47 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 48 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 49 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 50 51 // Test target codegen - host bc file has to be created first. 52 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 53 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK25 54 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 55 // RUN: %clang_cc1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK26 56 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 57 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK27 58 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 59 // RUN: %clang_cc1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck -allow-deprecated-dag-overlap %s --check-prefix=CHECK28 60 61 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 62 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 63 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 64 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 65 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 66 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 67 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 68 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 69 70 // expected-no-diagnostics 71 #ifndef HEADER 72 #define HEADER 73 74 75 76 77 // We have 8 target regions, but only 7 that actually will generate offloading 78 // code, only 6 will have mapped arguments, and only 4 have all-constant map 79 // sizes. 80 81 82 83 // Check target registration is registered as a Ctor. 84 85 86 template<typename tx, typename ty> 87 struct TT{ 88 tx X; 89 ty Y; 90 }; 91 92 long long get_val() { return 0; } 93 94 int foo(int n) { 95 int a = 0; 96 short aa = 0; 97 float b[10]; 98 float bn[n]; 99 double c[5][10]; 100 double cn[5][n]; 101 TT<long long, char> d; 102 103 #pragma omp target parallel for 104 for (int i = 3; i < 32; i += 5) { 105 #pragma omp cancel for 106 #pragma omp cancellation point for 107 } 108 109 long long k = get_val(); 110 #pragma omp target parallel for if(target: 0) linear(k : 3) schedule(dynamic) 111 for (int i = 10; i > 1; i--) { 112 a += 1; 113 } 114 115 // CEHCK-32: [[FPSIZEGEP]] = getelementptr inbounds [[KMP_PRIVATES_T]], [[KMP_PRIVATES_T]]* [[KMP_PRIVATES]], i32 0, i32 0 116 // CEHCK-32: [[FPSIZEADDR:%.+]] = bitcast [3 x i64]* [[FPSIZEGEP]] to i8* 117 // CEHCK-32: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[FPSIZEADDR]], i8* align 8 bitcast ([3 x i64]* [[SIZET2]] to i8*), i64 24, i1 false) 118 // CEHCK-32: [[FPBPGEP:%.+]] = getelementptr inbounds [[KMP_PRIVATES_T]], [[KMP_PRIVATES_T]]* [[KMP_PRIVATES]], i32 0, i32 1 119 // CEHCK-32: [[FPBPADDR:%.+]] = bitcast [3 x i8*]* [[FPBPGEP]] to i8* 120 // CEHCK-32: [[BPCAST:%.+]] = bitcast i8** [[BPGEP]] to i8* 121 // CEHCK-32: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[FPBPADDR]], i8* align 8 [[BPCAST]], i64 24, i1 false) 122 // CEHCK-32: [[FPPGEP:%.+]] = getelementptr inbounds [[KMP_PRIVATES_T]], [[KMP_PRIVATES_T]]* [[KMP_PRIVATES]], i32 0, i32 2 123 // CEHCK-32: [[FPPADDR:%.+]] = bitcast [3 x i8*]* [[FPPGEP]] to i8* 124 // CEHCK-32: [[PCAST:%.+]] = bitcast i8** [[PGEP]] to i8* 125 // CEHCK-32: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[FPPADDR]], i8* align 8 [[BCAST]], i64 24, i1 false) 126 // CEHCK-64: [[FPBPGEP:%.+]] = getelementptr inbounds [[KMP_PRIVATES_T]], [[KMP_PRIVATES_T]]* [[KMP_PRIVATES]], i32 0, i32 0 127 // CEHCK-64: [[FPBPADDR:%.+]] = bitcast [3 x i8*]* [[FPBPGEP]] to i8* 128 // CEHCK-64: [[BPCAST:%.+]] = bitcast i8** [[BPGEP]] to i8* 129 // CEHCK-64: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[FPBPADDR]], i8* align 8 [[BPCAST]], i64 24, i1 false) 130 // CEHCK-64: [[FPPGEP:%.+]] = getelementptr inbounds [[KMP_PRIVATES_T]], [[KMP_PRIVATES_T]]* [[KMP_PRIVATES]], i32 0, i32 1 131 // CEHCK-64: [[FPPADDR:%.+]] = bitcast [3 x i8*]* [[FPPGEP]] to i8* 132 // CEHCK-64: [[PCAST:%.+]] = bitcast i8** [[PGEP]] to i8* 133 // CEHCK-64: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[FPPADDR]], i8* align 8 [[BCAST]], i64 24, i1 false) 134 // CEHCK-64: [[FPSIZEGEP]] = getelementptr inbounds [[KMP_PRIVATES_T]], [[KMP_PRIVATES_T]]* [[KMP_PRIVATES]], i32 0, i32 2 135 // CEHCK-64: [[FPSIZEADDR:%.+]] = bitcast [3 x i64]* [[FPSIZEGEP]] to i8* 136 // CEHCK-64: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[FPSIZEADDR]], i8* align 8 bitcast ([3 x i64]* [[SIZET2]] to i8*), i64 24, i1 false) 137 int lin = 12; 138 #pragma omp target parallel for if(target: 1) linear(lin, a : get_val()) nowait 139 for (unsigned long long it = 2000; it >= 600; it-=400) { 140 aa += 1; 141 } 142 143 144 145 146 #pragma omp target parallel for if(target: n>10) 147 for (short it = 6; it <= 20; it-=-4) { 148 a += 1; 149 aa += 1; 150 } 151 152 // We capture 3 VLA sizes in this target region 153 154 155 156 157 158 // The names below are not necessarily consistent with the names used for the 159 // addresses above as some are repeated. 160 161 162 163 164 165 166 167 168 169 170 #pragma omp target parallel for if(target: n>20) schedule(static, a) 171 for (unsigned char it = 'z'; it >= 'a'; it+=-1) { 172 a += 1; 173 b[2] += 1.0; 174 bn[3] += 1.0; 175 c[1][2] += 1.0; 176 cn[1][3] += 1.0; 177 d.X += 1; 178 d.Y += 1; 179 } 180 181 return a; 182 } 183 184 // Check that the offloading functions are emitted and that the arguments are 185 // correct and loaded correctly for the target regions in foo(). 186 187 188 // Create stack storage and store argument in there. 189 190 // Create stack storage and store argument in there. 191 192 193 // Create stack storage and store argument in there. 194 195 // Create local storage for each capture. 196 197 198 199 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. 200 201 template<typename tx> 202 tx ftemplate(int n) { 203 tx a = 0; 204 short aa = 0; 205 tx b[10]; 206 207 #pragma omp target parallel for if(target: n>40) 208 for (long long i = -10; i < 10; i += 3) { 209 a += 1; 210 aa += 1; 211 b[2] += 1; 212 } 213 214 return a; 215 } 216 217 static 218 int fstatic(int n) { 219 int a = 0; 220 short aa = 0; 221 char aaa = 0; 222 int b[10]; 223 224 #pragma omp target parallel for if(target: n>50) 225 for (unsigned i=100; i<10; i+=10) { 226 a += 1; 227 aa += 1; 228 aaa += 1; 229 b[2] += 1; 230 } 231 232 return a; 233 } 234 235 struct S1 { 236 double a; 237 238 int r1(int n){ 239 int b = n+1; 240 short int c[2][n]; 241 242 #pragma omp target parallel for if(target: n>60) 243 for (unsigned long long it = 2000; it >= 600; it -= 400) { 244 this->a = (double)b + 1.5; 245 c[1][1] = ++a; 246 } 247 248 return c[1][1] + (int)b; 249 } 250 }; 251 252 int bar(int n){ 253 int a = 0; 254 255 a += foo(n); 256 257 S1 S; 258 a += S.r1(n); 259 260 a += fstatic(n); 261 262 a += ftemplate<int>(n); 263 264 return a; 265 } 266 267 268 269 // We capture 2 VLA sizes in this target region 270 271 272 // The names below are not necessarily consistent with the names used for the 273 // addresses above as some are repeated. 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 // Check that the offloading functions are emitted and that the arguments are 293 // correct and loaded correctly for the target regions of the callees of bar(). 294 295 // Create local storage for each capture. 296 // Store captures in the context. 297 298 299 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. 300 301 302 // Create local storage for each capture. 303 // Store captures in the context. 304 305 306 307 308 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. 309 310 // Create local storage for each capture. 311 // Store captures in the context. 312 313 314 315 // To reduce complexity, we're only going as far as validating the signature of the outlined parallel function. 316 317 318 #endif 319 // CHECK1-LABEL: define {{[^@]+}}@_Z7get_valv 320 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 321 // CHECK1-NEXT: entry: 322 // CHECK1-NEXT: ret i64 0 323 // 324 // 325 // CHECK1-LABEL: define {{[^@]+}}@_Z3fooi 326 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 327 // CHECK1-NEXT: entry: 328 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 329 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 330 // CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2 331 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x float], align 4 332 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 333 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 334 // CHECK1-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 335 // CHECK1-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 336 // CHECK1-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 337 // CHECK1-NEXT: [[K:%.*]] = alloca i64, align 8 338 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 339 // CHECK1-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 340 // CHECK1-NEXT: [[LIN:%.*]] = alloca i32, align 4 341 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 342 // CHECK1-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 343 // CHECK1-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 344 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 345 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 346 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 347 // CHECK1-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4 348 // CHECK1-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 349 // CHECK1-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 350 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 351 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 352 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 353 // CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 354 // CHECK1-NEXT: [[A_CASTED15:%.*]] = alloca i64, align 8 355 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 356 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 357 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 358 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 359 // CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 360 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 361 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 362 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 363 // CHECK1-NEXT: store i16 0, i16* [[AA]], align 2 364 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 365 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 366 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 367 // CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 368 // CHECK1-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 369 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 370 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 371 // CHECK1-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 372 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] 373 // CHECK1-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 374 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 375 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) 376 // CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 377 // CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 378 // CHECK1: omp_offload.failed: 379 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103() #[[ATTR4:[0-9]+]] 380 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 381 // CHECK1: omp_offload.cont: 382 // CHECK1-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 383 // CHECK1-NEXT: store i64 [[CALL]], i64* [[K]], align 8 384 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 385 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 386 // CHECK1-NEXT: store i32 [[TMP9]], i32* [[CONV]], align 4 387 // CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[A_CASTED]], align 8 388 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[K]], align 8 389 // CHECK1-NEXT: store i64 [[TMP11]], i64* [[K_CASTED]], align 8 390 // CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[K_CASTED]], align 8 391 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP10]], i64 [[TMP12]]) #[[ATTR4]] 392 // CHECK1-NEXT: store i32 12, i32* [[LIN]], align 4 393 // CHECK1-NEXT: [[TMP13:%.*]] = load i16, i16* [[AA]], align 2 394 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 395 // CHECK1-NEXT: store i16 [[TMP13]], i16* [[CONV2]], align 2 396 // CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[AA_CASTED]], align 8 397 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[LIN]], align 4 398 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 399 // CHECK1-NEXT: store i32 [[TMP15]], i32* [[CONV3]], align 4 400 // CHECK1-NEXT: [[TMP16:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 401 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[A]], align 4 402 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* 403 // CHECK1-NEXT: store i32 [[TMP17]], i32* [[CONV5]], align 4 404 // CHECK1-NEXT: [[TMP18:%.*]] = load i64, i64* [[A_CASTED4]], align 8 405 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 406 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 407 // CHECK1-NEXT: store i64 [[TMP14]], i64* [[TMP20]], align 8 408 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 409 // CHECK1-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 410 // CHECK1-NEXT: store i64 [[TMP14]], i64* [[TMP22]], align 8 411 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 412 // CHECK1-NEXT: store i8* null, i8** [[TMP23]], align 8 413 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 414 // CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 415 // CHECK1-NEXT: store i64 [[TMP16]], i64* [[TMP25]], align 8 416 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 417 // CHECK1-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* 418 // CHECK1-NEXT: store i64 [[TMP16]], i64* [[TMP27]], align 8 419 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 420 // CHECK1-NEXT: store i8* null, i8** [[TMP28]], align 8 421 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 422 // CHECK1-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* 423 // CHECK1-NEXT: store i64 [[TMP18]], i64* [[TMP30]], align 8 424 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 425 // CHECK1-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 426 // CHECK1-NEXT: store i64 [[TMP18]], i64* [[TMP32]], align 8 427 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 428 // CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8 429 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 430 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 431 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0 432 // CHECK1-NEXT: [[TMP37:%.*]] = load i16, i16* [[AA]], align 2 433 // CHECK1-NEXT: store i16 [[TMP37]], i16* [[TMP36]], align 4 434 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1 435 // CHECK1-NEXT: [[TMP39:%.*]] = load i32, i32* [[LIN]], align 4 436 // CHECK1-NEXT: store i32 [[TMP39]], i32* [[TMP38]], align 4 437 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2 438 // CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4 439 // CHECK1-NEXT: store i32 [[TMP41]], i32* [[TMP40]], align 4 440 // CHECK1-NEXT: [[TMP42:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 441 // CHECK1-NEXT: [[TMP43:%.*]] = bitcast i8* [[TMP42]] to %struct.kmp_task_t_with_privates* 442 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP43]], i32 0, i32 0 443 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP44]], i32 0, i32 0 444 // CHECK1-NEXT: [[TMP46:%.*]] = load i8*, i8** [[TMP45]], align 8 445 // CHECK1-NEXT: [[TMP47:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* 446 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP46]], i8* align 4 [[TMP47]], i64 12, i1 false) 447 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP43]], i32 0, i32 1 448 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast i8* [[TMP46]] to %struct.anon* 449 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 0 450 // CHECK1-NEXT: [[TMP51:%.*]] = bitcast [3 x i8*]* [[TMP50]] to i8* 451 // CHECK1-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP34]] to i8* 452 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP51]], i8* align 8 [[TMP52]], i64 24, i1 false) 453 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 1 454 // CHECK1-NEXT: [[TMP54:%.*]] = bitcast [3 x i8*]* [[TMP53]] to i8* 455 // CHECK1-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP35]] to i8* 456 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP54]], i8* align 8 [[TMP55]], i64 24, i1 false) 457 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 2 458 // CHECK1-NEXT: [[TMP57:%.*]] = bitcast [3 x i64]* [[TMP56]] to i8* 459 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP57]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes to i8*), i64 24, i1 false) 460 // CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 3 461 // CHECK1-NEXT: [[TMP59:%.*]] = load i16, i16* [[AA]], align 2 462 // CHECK1-NEXT: store i16 [[TMP59]], i16* [[TMP58]], align 8 463 // CHECK1-NEXT: [[TMP60:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP42]]) 464 // CHECK1-NEXT: [[TMP61:%.*]] = load i32, i32* [[A]], align 4 465 // CHECK1-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* 466 // CHECK1-NEXT: store i32 [[TMP61]], i32* [[CONV7]], align 4 467 // CHECK1-NEXT: [[TMP62:%.*]] = load i64, i64* [[A_CASTED6]], align 8 468 // CHECK1-NEXT: [[TMP63:%.*]] = load i16, i16* [[AA]], align 2 469 // CHECK1-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* 470 // CHECK1-NEXT: store i16 [[TMP63]], i16* [[CONV9]], align 2 471 // CHECK1-NEXT: [[TMP64:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 472 // CHECK1-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_ADDR]], align 4 473 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP65]], 10 474 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 475 // CHECK1: omp_if.then: 476 // CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 477 // CHECK1-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* 478 // CHECK1-NEXT: store i64 [[TMP62]], i64* [[TMP67]], align 8 479 // CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 480 // CHECK1-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 481 // CHECK1-NEXT: store i64 [[TMP62]], i64* [[TMP69]], align 8 482 // CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 483 // CHECK1-NEXT: store i8* null, i8** [[TMP70]], align 8 484 // CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 485 // CHECK1-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* 486 // CHECK1-NEXT: store i64 [[TMP64]], i64* [[TMP72]], align 8 487 // CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 488 // CHECK1-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64* 489 // CHECK1-NEXT: store i64 [[TMP64]], i64* [[TMP74]], align 8 490 // CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 491 // CHECK1-NEXT: store i8* null, i8** [[TMP75]], align 8 492 // CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 493 // CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 494 // CHECK1-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146.region_id, i32 2, i8** [[TMP76]], i8** [[TMP77]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 495 // CHECK1-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0 496 // CHECK1-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]] 497 // CHECK1: omp_offload.failed13: 498 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i64 [[TMP62]], i64 [[TMP64]]) #[[ATTR4]] 499 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT14]] 500 // CHECK1: omp_offload.cont14: 501 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 502 // CHECK1: omp_if.else: 503 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i64 [[TMP62]], i64 [[TMP64]]) #[[ATTR4]] 504 // CHECK1-NEXT: br label [[OMP_IF_END]] 505 // CHECK1: omp_if.end: 506 // CHECK1-NEXT: [[TMP80:%.*]] = load i32, i32* [[A]], align 4 507 // CHECK1-NEXT: store i32 [[TMP80]], i32* [[DOTCAPTURE_EXPR_]], align 4 508 // CHECK1-NEXT: [[TMP81:%.*]] = load i32, i32* [[A]], align 4 509 // CHECK1-NEXT: [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32* 510 // CHECK1-NEXT: store i32 [[TMP81]], i32* [[CONV16]], align 4 511 // CHECK1-NEXT: [[TMP82:%.*]] = load i64, i64* [[A_CASTED15]], align 8 512 // CHECK1-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 513 // CHECK1-NEXT: [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 514 // CHECK1-NEXT: store i32 [[TMP83]], i32* [[CONV17]], align 4 515 // CHECK1-NEXT: [[TMP84:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 516 // CHECK1-NEXT: [[TMP85:%.*]] = load i32, i32* [[N_ADDR]], align 4 517 // CHECK1-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP85]], 20 518 // CHECK1-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]] 519 // CHECK1: omp_if.then19: 520 // CHECK1-NEXT: [[TMP86:%.*]] = mul nuw i64 [[TMP2]], 4 521 // CHECK1-NEXT: [[TMP87:%.*]] = mul nuw i64 5, [[TMP5]] 522 // CHECK1-NEXT: [[TMP88:%.*]] = mul nuw i64 [[TMP87]], 8 523 // CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 524 // CHECK1-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* 525 // CHECK1-NEXT: store i64 [[TMP82]], i64* [[TMP90]], align 8 526 // CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 527 // CHECK1-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* 528 // CHECK1-NEXT: store i64 [[TMP82]], i64* [[TMP92]], align 8 529 // CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 530 // CHECK1-NEXT: store i64 4, i64* [[TMP93]], align 8 531 // CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 532 // CHECK1-NEXT: store i8* null, i8** [[TMP94]], align 8 533 // CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 534 // CHECK1-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** 535 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 8 536 // CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 537 // CHECK1-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x float]** 538 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP98]], align 8 539 // CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 540 // CHECK1-NEXT: store i64 40, i64* [[TMP99]], align 8 541 // CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 542 // CHECK1-NEXT: store i8* null, i8** [[TMP100]], align 8 543 // CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 544 // CHECK1-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i64* 545 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP102]], align 8 546 // CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 547 // CHECK1-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i64* 548 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP104]], align 8 549 // CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 550 // CHECK1-NEXT: store i64 8, i64* [[TMP105]], align 8 551 // CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 552 // CHECK1-NEXT: store i8* null, i8** [[TMP106]], align 8 553 // CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 554 // CHECK1-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** 555 // CHECK1-NEXT: store float* [[VLA]], float** [[TMP108]], align 8 556 // CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 557 // CHECK1-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to float** 558 // CHECK1-NEXT: store float* [[VLA]], float** [[TMP110]], align 8 559 // CHECK1-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 560 // CHECK1-NEXT: store i64 [[TMP86]], i64* [[TMP111]], align 8 561 // CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 562 // CHECK1-NEXT: store i8* null, i8** [[TMP112]], align 8 563 // CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 564 // CHECK1-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** 565 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 8 566 // CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 567 // CHECK1-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** 568 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 569 // CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 570 // CHECK1-NEXT: store i64 400, i64* [[TMP117]], align 8 571 // CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 572 // CHECK1-NEXT: store i8* null, i8** [[TMP118]], align 8 573 // CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 574 // CHECK1-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* 575 // CHECK1-NEXT: store i64 5, i64* [[TMP120]], align 8 576 // CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 577 // CHECK1-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* 578 // CHECK1-NEXT: store i64 5, i64* [[TMP122]], align 8 579 // CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 580 // CHECK1-NEXT: store i64 8, i64* [[TMP123]], align 8 581 // CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 582 // CHECK1-NEXT: store i8* null, i8** [[TMP124]], align 8 583 // CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 584 // CHECK1-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* 585 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 586 // CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 587 // CHECK1-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* 588 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 589 // CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 590 // CHECK1-NEXT: store i64 8, i64* [[TMP129]], align 8 591 // CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 592 // CHECK1-NEXT: store i8* null, i8** [[TMP130]], align 8 593 // CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 594 // CHECK1-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** 595 // CHECK1-NEXT: store double* [[VLA1]], double** [[TMP132]], align 8 596 // CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 597 // CHECK1-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** 598 // CHECK1-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 599 // CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 600 // CHECK1-NEXT: store i64 [[TMP88]], i64* [[TMP135]], align 8 601 // CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 602 // CHECK1-NEXT: store i8* null, i8** [[TMP136]], align 8 603 // CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 604 // CHECK1-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** 605 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 606 // CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 607 // CHECK1-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to %struct.TT** 608 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP140]], align 8 609 // CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 610 // CHECK1-NEXT: store i64 16, i64* [[TMP141]], align 8 611 // CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 612 // CHECK1-NEXT: store i8* null, i8** [[TMP142]], align 8 613 // CHECK1-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 614 // CHECK1-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* 615 // CHECK1-NEXT: store i64 [[TMP84]], i64* [[TMP144]], align 8 616 // CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 617 // CHECK1-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* 618 // CHECK1-NEXT: store i64 [[TMP84]], i64* [[TMP146]], align 8 619 // CHECK1-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 620 // CHECK1-NEXT: store i64 4, i64* [[TMP147]], align 8 621 // CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 622 // CHECK1-NEXT: store i8* null, i8** [[TMP148]], align 8 623 // CHECK1-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 624 // CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 625 // CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 626 // CHECK1-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 627 // CHECK1-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 628 // CHECK1-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] 629 // CHECK1: omp_offload.failed23: 630 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] 631 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT24]] 632 // CHECK1: omp_offload.cont24: 633 // CHECK1-NEXT: br label [[OMP_IF_END26:%.*]] 634 // CHECK1: omp_if.else25: 635 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] 636 // CHECK1-NEXT: br label [[OMP_IF_END26]] 637 // CHECK1: omp_if.end26: 638 // CHECK1-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 639 // CHECK1-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 640 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) 641 // CHECK1-NEXT: ret i32 [[TMP154]] 642 // 643 // 644 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 645 // CHECK1-SAME: () #[[ATTR2:[0-9]+]] { 646 // CHECK1-NEXT: entry: 647 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 648 // CHECK1-NEXT: ret void 649 // 650 // 651 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 652 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 653 // CHECK1-NEXT: entry: 654 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 655 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 656 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 657 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 658 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 659 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 660 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 661 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 662 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 663 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 664 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 665 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 666 // CHECK1-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 667 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 668 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 669 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 670 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 671 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 672 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 673 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 674 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 675 // CHECK1: cond.true: 676 // CHECK1-NEXT: br label [[COND_END:%.*]] 677 // CHECK1: cond.false: 678 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 679 // CHECK1-NEXT: br label [[COND_END]] 680 // CHECK1: cond.end: 681 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 682 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 683 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 684 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 685 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 686 // CHECK1: omp.inner.for.cond: 687 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 688 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 689 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 690 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 691 // CHECK1: omp.inner.for.body: 692 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 693 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 694 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 695 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4 696 // CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 697 // CHECK1-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 698 // CHECK1-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 699 // CHECK1: .cancel.exit: 700 // CHECK1-NEXT: br label [[CANCEL_EXIT:%.*]] 701 // CHECK1: .cancel.continue: 702 // CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 703 // CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 704 // CHECK1-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 705 // CHECK1: .cancel.exit2: 706 // CHECK1-NEXT: br label [[CANCEL_EXIT]] 707 // CHECK1: .cancel.continue3: 708 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 709 // CHECK1: omp.body.continue: 710 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 711 // CHECK1: omp.inner.for.inc: 712 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 713 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 714 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 715 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 716 // CHECK1: omp.inner.for.end: 717 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 718 // CHECK1: omp.loop.exit: 719 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 720 // CHECK1-NEXT: br label [[CANCEL_CONT:%.*]] 721 // CHECK1: cancel.cont: 722 // CHECK1-NEXT: ret void 723 // CHECK1: cancel.exit: 724 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 725 // CHECK1-NEXT: br label [[CANCEL_CONT]] 726 // 727 // 728 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110 729 // CHECK1-SAME: (i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 730 // CHECK1-NEXT: entry: 731 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 732 // CHECK1-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 733 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 734 // CHECK1-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 735 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 736 // CHECK1-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 737 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 738 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 739 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* 740 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 741 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 742 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 743 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 744 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 745 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 746 // CHECK1-NEXT: ret void 747 // 748 // 749 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 750 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 751 // CHECK1-NEXT: entry: 752 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 753 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 754 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 755 // CHECK1-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 756 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 757 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 758 // CHECK1-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 759 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 760 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 761 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 762 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 763 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 764 // CHECK1-NEXT: [[K1:%.*]] = alloca i64, align 8 765 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 766 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 767 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 768 // CHECK1-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 769 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 770 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 771 // CHECK1-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 772 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 773 // CHECK1-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 774 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 775 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 776 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 777 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 778 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 779 // CHECK1-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 35, i32 0, i32 8, i32 1, i32 1) 780 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 781 // CHECK1: omp.dispatch.cond: 782 // CHECK1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 783 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 784 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 785 // CHECK1: omp.dispatch.body: 786 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 787 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 788 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 789 // CHECK1: omp.inner.for.cond: 790 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 791 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 792 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 793 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 794 // CHECK1: omp.inner.for.body: 795 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 796 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 797 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 798 // CHECK1-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !12 799 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !12 800 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 801 // CHECK1-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 802 // CHECK1-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 803 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] 804 // CHECK1-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !12 805 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !12 806 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 807 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !12 808 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 809 // CHECK1: omp.body.continue: 810 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 811 // CHECK1: omp.inner.for.inc: 812 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 813 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 814 // CHECK1-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 815 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 816 // CHECK1: omp.inner.for.end: 817 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 818 // CHECK1: omp.dispatch.inc: 819 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] 820 // CHECK1: omp.dispatch.end: 821 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 822 // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 823 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 824 // CHECK1: .omp.linear.pu: 825 // CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8 826 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP14]], 27 827 // CHECK1-NEXT: store i64 [[ADD6]], i64* [[K_ADDR]], align 8 828 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 829 // CHECK1: .omp.linear.pu.done: 830 // CHECK1-NEXT: ret void 831 // 832 // 833 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 834 // CHECK1-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR2]] { 835 // CHECK1-NEXT: entry: 836 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 837 // CHECK1-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 838 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 839 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 840 // CHECK1-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 841 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 842 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 843 // CHECK1-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 844 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 845 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 846 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 847 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 848 // CHECK1-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 849 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 850 // CHECK1-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 851 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 852 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 853 // CHECK1-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 854 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 855 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 856 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 857 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 858 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 859 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 860 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 861 // CHECK1-NEXT: ret void 862 // 863 // 864 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2 865 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR3]] { 866 // CHECK1-NEXT: entry: 867 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 868 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 869 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 870 // CHECK1-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 871 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 872 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 873 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 874 // CHECK1-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 875 // CHECK1-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 876 // CHECK1-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 877 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 878 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 879 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 880 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 881 // CHECK1-NEXT: [[IT:%.*]] = alloca i64, align 8 882 // CHECK1-NEXT: [[LIN4:%.*]] = alloca i32, align 4 883 // CHECK1-NEXT: [[A5:%.*]] = alloca i32, align 4 884 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 885 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 886 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 887 // CHECK1-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 888 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 889 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 890 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 891 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 892 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 893 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 894 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 895 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 896 // CHECK1-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 897 // CHECK1-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 898 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 899 // CHECK1-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 900 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 901 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 902 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 903 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 904 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 905 // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 906 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 907 // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 908 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 909 // CHECK1: cond.true: 910 // CHECK1-NEXT: br label [[COND_END:%.*]] 911 // CHECK1: cond.false: 912 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 913 // CHECK1-NEXT: br label [[COND_END]] 914 // CHECK1: cond.end: 915 // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 916 // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 917 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 918 // CHECK1-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 919 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 920 // CHECK1: omp.inner.for.cond: 921 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 922 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 923 // CHECK1-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 924 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 925 // CHECK1: omp.inner.for.body: 926 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 927 // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 928 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 929 // CHECK1-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 930 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 931 // CHECK1-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 932 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 933 // CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 934 // CHECK1-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 935 // CHECK1-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 936 // CHECK1-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 937 // CHECK1-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4 938 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 939 // CHECK1-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 940 // CHECK1-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 941 // CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 942 // CHECK1-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 943 // CHECK1-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 944 // CHECK1-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 945 // CHECK1-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4 946 // CHECK1-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8 947 // CHECK1-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 948 // CHECK1-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 949 // CHECK1-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 950 // CHECK1-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8 951 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 952 // CHECK1: omp.body.continue: 953 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 954 // CHECK1: omp.inner.for.inc: 955 // CHECK1-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 956 // CHECK1-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 957 // CHECK1-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8 958 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 959 // CHECK1: omp.inner.for.end: 960 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 961 // CHECK1: omp.loop.exit: 962 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 963 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 964 // CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 965 // CHECK1-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 966 // CHECK1: .omp.linear.pu: 967 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 968 // CHECK1-NEXT: [[CONV18:%.*]] = sext i32 [[TMP20]] to i64 969 // CHECK1-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 970 // CHECK1-NEXT: [[MUL19:%.*]] = mul i64 4, [[TMP21]] 971 // CHECK1-NEXT: [[ADD20:%.*]] = add i64 [[CONV18]], [[MUL19]] 972 // CHECK1-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD20]] to i32 973 // CHECK1-NEXT: store i32 [[CONV21]], i32* [[CONV1]], align 8 974 // CHECK1-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 975 // CHECK1-NEXT: [[CONV22:%.*]] = sext i32 [[TMP22]] to i64 976 // CHECK1-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 977 // CHECK1-NEXT: [[MUL23:%.*]] = mul i64 4, [[TMP23]] 978 // CHECK1-NEXT: [[ADD24:%.*]] = add i64 [[CONV22]], [[MUL23]] 979 // CHECK1-NEXT: [[CONV25:%.*]] = trunc i64 [[ADD24]] to i32 980 // CHECK1-NEXT: store i32 [[CONV25]], i32* [[CONV2]], align 8 981 // CHECK1-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 982 // CHECK1: .omp.linear.pu.done: 983 // CHECK1-NEXT: ret void 984 // 985 // 986 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_privates_map. 987 // CHECK1-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i16** noalias [[TMP1:%.*]], [3 x i8*]** noalias [[TMP2:%.*]], [3 x i8*]** noalias [[TMP3:%.*]], [3 x i64]** noalias [[TMP4:%.*]]) #[[ATTR6:[0-9]+]] { 988 // CHECK1-NEXT: entry: 989 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8 990 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i16**, align 8 991 // CHECK1-NEXT: [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 8 992 // CHECK1-NEXT: [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 8 993 // CHECK1-NEXT: [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 8 994 // CHECK1-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8 995 // CHECK1-NEXT: store i16** [[TMP1]], i16*** [[DOTADDR1]], align 8 996 // CHECK1-NEXT: store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 8 997 // CHECK1-NEXT: store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 8 998 // CHECK1-NEXT: store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 8 999 // CHECK1-NEXT: [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8 1000 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0 1001 // CHECK1-NEXT: [[TMP7:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 8 1002 // CHECK1-NEXT: store [3 x i8*]* [[TMP6]], [3 x i8*]** [[TMP7]], align 8 1003 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1 1004 // CHECK1-NEXT: [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 8 1005 // CHECK1-NEXT: store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 8 1006 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2 1007 // CHECK1-NEXT: [[TMP11:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 8 1008 // CHECK1-NEXT: store [3 x i64]* [[TMP10]], [3 x i64]** [[TMP11]], align 8 1009 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3 1010 // CHECK1-NEXT: [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 8 1011 // CHECK1-NEXT: store i16* [[TMP12]], i16** [[TMP13]], align 8 1012 // CHECK1-NEXT: ret void 1013 // 1014 // 1015 // CHECK1-LABEL: define {{[^@]+}}@.omp_task_entry. 1016 // CHECK1-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] { 1017 // CHECK1-NEXT: entry: 1018 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 1019 // CHECK1-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 1020 // CHECK1-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 1021 // CHECK1-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 1022 // CHECK1-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 1023 // CHECK1-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 1024 // CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 8 1025 // CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 8 1026 // CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 8 1027 // CHECK1-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 8 1028 // CHECK1-NEXT: [[AA_CASTED_I:%.*]] = alloca i64, align 8 1029 // CHECK1-NEXT: [[LIN_CASTED_I:%.*]] = alloca i64, align 8 1030 // CHECK1-NEXT: [[A_CASTED_I:%.*]] = alloca i64, align 8 1031 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 1032 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 1033 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 1034 // CHECK1-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 1035 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 1036 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 1037 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 1038 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 1039 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 1040 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1041 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 1042 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 1043 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* 1044 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 1045 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) 1046 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) 1047 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) 1048 // CHECK1-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) 1049 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 1050 // CHECK1-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 1051 // CHECK1-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 1052 // CHECK1-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 1053 // CHECK1-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 1054 // CHECK1-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !24 1055 // CHECK1-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !24 1056 // CHECK1-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 1057 // CHECK1-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 1058 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* 1059 // CHECK1-NEXT: call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] 1060 // CHECK1-NEXT: [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !24 1061 // CHECK1-NEXT: [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !24 1062 // CHECK1-NEXT: [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !24 1063 // CHECK1-NEXT: [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !24 1064 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i64 0, i64 0 1065 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i64 0, i64 0 1066 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i64 0, i64 0 1067 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1 1068 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2 1069 // CHECK1-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 1070 // CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 1071 // CHECK1-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] 1072 // CHECK1: omp_offload.failed.i: 1073 // CHECK1-NEXT: [[TMP27:%.*]] = load i16, i16* [[TMP16]], align 2 1074 // CHECK1-NEXT: [[CONV_I:%.*]] = bitcast i64* [[AA_CASTED_I]] to i16* 1075 // CHECK1-NEXT: store i16 [[TMP27]], i16* [[CONV_I]], align 2, !noalias !24 1076 // CHECK1-NEXT: [[TMP28:%.*]] = load i64, i64* [[AA_CASTED_I]], align 8, !noalias !24 1077 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP23]], align 4 1078 // CHECK1-NEXT: [[CONV4_I:%.*]] = bitcast i64* [[LIN_CASTED_I]] to i32* 1079 // CHECK1-NEXT: store i32 [[TMP29]], i32* [[CONV4_I]], align 4, !noalias !24 1080 // CHECK1-NEXT: [[TMP30:%.*]] = load i64, i64* [[LIN_CASTED_I]], align 8, !noalias !24 1081 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP24]], align 4 1082 // CHECK1-NEXT: [[CONV5_I:%.*]] = bitcast i64* [[A_CASTED_I]] to i32* 1083 // CHECK1-NEXT: store i32 [[TMP31]], i32* [[CONV5_I]], align 4, !noalias !24 1084 // CHECK1-NEXT: [[TMP32:%.*]] = load i64, i64* [[A_CASTED_I]], align 8, !noalias !24 1085 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138(i64 [[TMP28]], i64 [[TMP30]], i64 [[TMP32]]) #[[ATTR4]] 1086 // CHECK1-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] 1087 // CHECK1: .omp_outlined..3.exit: 1088 // CHECK1-NEXT: ret i32 0 1089 // 1090 // 1091 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 1092 // CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] { 1093 // CHECK1-NEXT: entry: 1094 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1095 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1096 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1097 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1098 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1099 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1100 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1101 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1102 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 1103 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1104 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 1105 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 1106 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 1107 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1108 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 1109 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1110 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 1111 // CHECK1-NEXT: ret void 1112 // 1113 // 1114 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4 1115 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR3]] { 1116 // CHECK1-NEXT: entry: 1117 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1118 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1119 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1120 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1121 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1122 // CHECK1-NEXT: [[TMP:%.*]] = alloca i16, align 2 1123 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1124 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1125 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1126 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1127 // CHECK1-NEXT: [[IT:%.*]] = alloca i16, align 2 1128 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1129 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1130 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1131 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1132 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1133 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1134 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1135 // CHECK1-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 1136 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1137 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1138 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1139 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1140 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1141 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1142 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 1143 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1144 // CHECK1: cond.true: 1145 // CHECK1-NEXT: br label [[COND_END:%.*]] 1146 // CHECK1: cond.false: 1147 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1148 // CHECK1-NEXT: br label [[COND_END]] 1149 // CHECK1: cond.end: 1150 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 1151 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1152 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1153 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 1154 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1155 // CHECK1: omp.inner.for.cond: 1156 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1157 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1158 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 1159 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1160 // CHECK1: omp.inner.for.body: 1161 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1162 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 1163 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 1164 // CHECK1-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 1165 // CHECK1-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2 1166 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 1167 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 1168 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8 1169 // CHECK1-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8 1170 // CHECK1-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 1171 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 1172 // CHECK1-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 1173 // CHECK1-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8 1174 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1175 // CHECK1: omp.body.continue: 1176 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1177 // CHECK1: omp.inner.for.inc: 1178 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1179 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 1180 // CHECK1-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 1181 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 1182 // CHECK1: omp.inner.for.end: 1183 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1184 // CHECK1: omp.loop.exit: 1185 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 1186 // CHECK1-NEXT: ret void 1187 // 1188 // 1189 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 1190 // CHECK1-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 1191 // CHECK1-NEXT: entry: 1192 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1193 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 1194 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 1195 // CHECK1-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 1196 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 1197 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 1198 // CHECK1-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 1199 // CHECK1-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 1200 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 1201 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 1202 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1203 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 1204 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1205 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 1206 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 1207 // CHECK1-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 1208 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 1209 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 1210 // CHECK1-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 1211 // CHECK1-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 1212 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 1213 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 1214 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1215 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 1216 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 1217 // CHECK1-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 1218 // CHECK1-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 1219 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 1220 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 1221 // CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 1222 // CHECK1-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 1223 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 1224 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 1225 // CHECK1-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1226 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 1227 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 1228 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 1229 // CHECK1-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 1230 // CHECK1-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 1231 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 1232 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 1233 // CHECK1-NEXT: ret void 1234 // 1235 // 1236 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7 1237 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 1238 // CHECK1-NEXT: entry: 1239 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1240 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1241 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1242 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 1243 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 1244 // CHECK1-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 1245 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 1246 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 1247 // CHECK1-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 1248 // CHECK1-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 1249 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 1250 // CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 1251 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1252 // CHECK1-NEXT: [[TMP:%.*]] = alloca i8, align 1 1253 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1254 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1255 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1256 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1257 // CHECK1-NEXT: [[IT:%.*]] = alloca i8, align 1 1258 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1259 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1260 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1261 // CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 1262 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 1263 // CHECK1-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 1264 // CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 1265 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 1266 // CHECK1-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 1267 // CHECK1-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 1268 // CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 1269 // CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 1270 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1271 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 1272 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 1273 // CHECK1-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 1274 // CHECK1-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 1275 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 1276 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 1277 // CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 1278 // CHECK1-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 1279 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 1280 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1281 // CHECK1-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 1282 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1283 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1284 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 1285 // CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1286 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 1287 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 1288 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 1289 // CHECK1: omp.dispatch.cond: 1290 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1291 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 1292 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1293 // CHECK1: cond.true: 1294 // CHECK1-NEXT: br label [[COND_END:%.*]] 1295 // CHECK1: cond.false: 1296 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1297 // CHECK1-NEXT: br label [[COND_END]] 1298 // CHECK1: cond.end: 1299 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 1300 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1301 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1302 // CHECK1-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 1303 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1304 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1305 // CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 1306 // CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 1307 // CHECK1: omp.dispatch.body: 1308 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1309 // CHECK1: omp.inner.for.cond: 1310 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1311 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1312 // CHECK1-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 1313 // CHECK1-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1314 // CHECK1: omp.inner.for.body: 1315 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1316 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 1317 // CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 1318 // CHECK1-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 1319 // CHECK1-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1 1320 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8 1321 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 1322 // CHECK1-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 1323 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 1324 // CHECK1-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 1325 // CHECK1-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 1326 // CHECK1-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 1327 // CHECK1-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 1328 // CHECK1-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4 1329 // CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 1330 // CHECK1-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4 1331 // CHECK1-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 1332 // CHECK1-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 1333 // CHECK1-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 1334 // CHECK1-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4 1335 // CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 1336 // CHECK1-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 1337 // CHECK1-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8 1338 // CHECK1-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 1339 // CHECK1-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8 1340 // CHECK1-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 1341 // CHECK1-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 1342 // CHECK1-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 1343 // CHECK1-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8 1344 // CHECK1-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 1345 // CHECK1-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8 1346 // CHECK1-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 1347 // CHECK1-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8 1348 // CHECK1-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 1349 // CHECK1-NEXT: store i64 [[ADD22]], i64* [[X]], align 8 1350 // CHECK1-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 1351 // CHECK1-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8 1352 // CHECK1-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 1353 // CHECK1-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 1354 // CHECK1-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 1355 // CHECK1-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8 1356 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1357 // CHECK1: omp.body.continue: 1358 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1359 // CHECK1: omp.inner.for.inc: 1360 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1361 // CHECK1-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 1362 // CHECK1-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4 1363 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 1364 // CHECK1: omp.inner.for.end: 1365 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 1366 // CHECK1: omp.dispatch.inc: 1367 // CHECK1-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1368 // CHECK1-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1369 // CHECK1-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 1370 // CHECK1-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 1371 // CHECK1-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1372 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1373 // CHECK1-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 1374 // CHECK1-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 1375 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] 1376 // CHECK1: omp.dispatch.end: 1377 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 1378 // CHECK1-NEXT: ret void 1379 // 1380 // 1381 // CHECK1-LABEL: define {{[^@]+}}@_Z3bari 1382 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 1383 // CHECK1-NEXT: entry: 1384 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1385 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 1386 // CHECK1-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 1387 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1388 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 1389 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1390 // CHECK1-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 1391 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 1392 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 1393 // CHECK1-NEXT: store i32 [[ADD]], i32* [[A]], align 4 1394 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 1395 // CHECK1-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 1396 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 1397 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 1398 // CHECK1-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 1399 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 1400 // CHECK1-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 1401 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 1402 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 1403 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 1404 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 1405 // CHECK1-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 1406 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 1407 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 1408 // CHECK1-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 1409 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 1410 // CHECK1-NEXT: ret i32 [[TMP8]] 1411 // 1412 // 1413 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 1414 // CHECK1-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 1415 // CHECK1-NEXT: entry: 1416 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 1417 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1418 // CHECK1-NEXT: [[B:%.*]] = alloca i32, align 4 1419 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 1420 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 1421 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 1422 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 1423 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 1424 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 1425 // CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 1426 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 1427 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1428 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 1429 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 1430 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 1431 // CHECK1-NEXT: store i32 [[ADD]], i32* [[B]], align 4 1432 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 1433 // CHECK1-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 1434 // CHECK1-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 1435 // CHECK1-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 1436 // CHECK1-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 1437 // CHECK1-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 1438 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 1439 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 1440 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 1441 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4 1442 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 1443 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4 1444 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 1445 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 1446 // CHECK1: omp_if.then: 1447 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 1448 // CHECK1-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]] 1449 // CHECK1-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 1450 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1451 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 1452 // CHECK1-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8 1453 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1454 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 1455 // CHECK1-NEXT: store double* [[A]], double** [[TMP13]], align 8 1456 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 1457 // CHECK1-NEXT: store i64 8, i64* [[TMP14]], align 8 1458 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1459 // CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 1460 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1461 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 1462 // CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 1463 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1464 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 1465 // CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 1466 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 1467 // CHECK1-NEXT: store i64 4, i64* [[TMP20]], align 8 1468 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 1469 // CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 1470 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1471 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* 1472 // CHECK1-NEXT: store i64 2, i64* [[TMP23]], align 8 1473 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1474 // CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 1475 // CHECK1-NEXT: store i64 2, i64* [[TMP25]], align 8 1476 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 1477 // CHECK1-NEXT: store i64 8, i64* [[TMP26]], align 8 1478 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 1479 // CHECK1-NEXT: store i8* null, i8** [[TMP27]], align 8 1480 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 1481 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 1482 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 1483 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 1484 // CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* 1485 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 1486 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 1487 // CHECK1-NEXT: store i64 8, i64* [[TMP32]], align 8 1488 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 1489 // CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8 1490 // CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 1491 // CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 1492 // CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 1493 // CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 1494 // CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 1495 // CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 1496 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 1497 // CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 1498 // CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 1499 // CHECK1-NEXT: store i8* null, i8** [[TMP39]], align 8 1500 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1501 // CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1502 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 1503 // CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1504 // CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 1505 // CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1506 // CHECK1: omp_offload.failed: 1507 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 1508 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 1509 // CHECK1: omp_offload.cont: 1510 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 1511 // CHECK1: omp_if.else: 1512 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 1513 // CHECK1-NEXT: br label [[OMP_IF_END]] 1514 // CHECK1: omp_if.end: 1515 // CHECK1-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] 1516 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] 1517 // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 1518 // CHECK1-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 1519 // CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 1520 // CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 1521 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] 1522 // CHECK1-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 1523 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 1524 // CHECK1-NEXT: ret i32 [[ADD4]] 1525 // 1526 // 1527 // CHECK1-LABEL: define {{[^@]+}}@_ZL7fstatici 1528 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 1529 // CHECK1-NEXT: entry: 1530 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1531 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 1532 // CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2 1533 // CHECK1-NEXT: [[AAA:%.*]] = alloca i8, align 1 1534 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 1535 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1536 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1537 // CHECK1-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 1538 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 1539 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 1540 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 1541 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1542 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 1543 // CHECK1-NEXT: store i16 0, i16* [[AA]], align 2 1544 // CHECK1-NEXT: store i8 0, i8* [[AAA]], align 1 1545 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 1546 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1547 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 1548 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 1549 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 1550 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1551 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 1552 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1553 // CHECK1-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 1554 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 1555 // CHECK1-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 1556 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 1557 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 1558 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 1559 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 1560 // CHECK1: omp_if.then: 1561 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1562 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 1563 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 1564 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1565 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 1566 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 1567 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1568 // CHECK1-NEXT: store i8* null, i8** [[TMP11]], align 8 1569 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1570 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 1571 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 1572 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1573 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 1574 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 1575 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 1576 // CHECK1-NEXT: store i8* null, i8** [[TMP16]], align 8 1577 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1578 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* 1579 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 1580 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1581 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 1582 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 1583 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 1584 // CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 1585 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 1586 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 1587 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 1588 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 1589 // CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 1590 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 1591 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 1592 // CHECK1-NEXT: store i8* null, i8** [[TMP26]], align 8 1593 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1594 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1595 // CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1596 // CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 1597 // CHECK1-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1598 // CHECK1: omp_offload.failed: 1599 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 1600 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 1601 // CHECK1: omp_offload.cont: 1602 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 1603 // CHECK1: omp_if.else: 1604 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 1605 // CHECK1-NEXT: br label [[OMP_IF_END]] 1606 // CHECK1: omp_if.end: 1607 // CHECK1-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 1608 // CHECK1-NEXT: ret i32 [[TMP31]] 1609 // 1610 // 1611 // CHECK1-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 1612 // CHECK1-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 1613 // CHECK1-NEXT: entry: 1614 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1615 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 1616 // CHECK1-NEXT: [[AA:%.*]] = alloca i16, align 2 1617 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 1618 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1619 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1620 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 1621 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 1622 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 1623 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 1624 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 1625 // CHECK1-NEXT: store i16 0, i16* [[AA]], align 2 1626 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 1627 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1628 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 1629 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 1630 // CHECK1-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 1631 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1632 // CHECK1-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 1633 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1634 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 1635 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 1636 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 1637 // CHECK1: omp_if.then: 1638 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1639 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 1640 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 1641 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1642 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 1643 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 1644 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1645 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8 1646 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1647 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* 1648 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 1649 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1650 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 1651 // CHECK1-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 1652 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 1653 // CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 1654 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1655 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 1656 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 1657 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1658 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 1659 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 1660 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 1661 // CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 1662 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1663 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1664 // CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1665 // CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 1666 // CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1667 // CHECK1: omp_offload.failed: 1668 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 1669 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 1670 // CHECK1: omp_offload.cont: 1671 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 1672 // CHECK1: omp_if.else: 1673 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 1674 // CHECK1-NEXT: br label [[OMP_IF_END]] 1675 // CHECK1: omp_if.end: 1676 // CHECK1-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 1677 // CHECK1-NEXT: ret i32 [[TMP24]] 1678 // 1679 // 1680 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 1681 // CHECK1-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 1682 // CHECK1-NEXT: entry: 1683 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 1684 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 1685 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 1686 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 1687 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 1688 // CHECK1-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 1689 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 1690 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 1691 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 1692 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 1693 // CHECK1-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 1694 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 1695 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 1696 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 1697 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 1698 // CHECK1-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 1699 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 1700 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 1701 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 1702 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 1703 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 1704 // CHECK1-NEXT: ret void 1705 // 1706 // 1707 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..9 1708 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 1709 // CHECK1-NEXT: entry: 1710 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1711 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1712 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 1713 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 1714 // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 1715 // CHECK1-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 1716 // CHECK1-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 1717 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 1718 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 1719 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 1720 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 1721 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 1722 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1723 // CHECK1-NEXT: [[IT:%.*]] = alloca i64, align 8 1724 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1725 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1726 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 1727 // CHECK1-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 1728 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 1729 // CHECK1-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 1730 // CHECK1-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 1731 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 1732 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 1733 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 1734 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 1735 // CHECK1-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 1736 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 1737 // CHECK1-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 1738 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 1739 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1740 // CHECK1-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1741 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 1742 // CHECK1-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 1743 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1744 // CHECK1-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 1745 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1746 // CHECK1: cond.true: 1747 // CHECK1-NEXT: br label [[COND_END:%.*]] 1748 // CHECK1: cond.false: 1749 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1750 // CHECK1-NEXT: br label [[COND_END]] 1751 // CHECK1: cond.end: 1752 // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 1753 // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 1754 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 1755 // CHECK1-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 1756 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1757 // CHECK1: omp.inner.for.cond: 1758 // CHECK1-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 1759 // CHECK1-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1760 // CHECK1-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 1761 // CHECK1-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1762 // CHECK1: omp.inner.for.body: 1763 // CHECK1-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 1764 // CHECK1-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 1765 // CHECK1-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 1766 // CHECK1-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 1767 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8 1768 // CHECK1-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 1769 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 1770 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 1771 // CHECK1-NEXT: store double [[ADD]], double* [[A]], align 8 1772 // CHECK1-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 1773 // CHECK1-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8 1774 // CHECK1-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 1775 // CHECK1-NEXT: store double [[INC]], double* [[A5]], align 8 1776 // CHECK1-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 1777 // CHECK1-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 1778 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 1779 // CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 1780 // CHECK1-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2 1781 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1782 // CHECK1: omp.body.continue: 1783 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1784 // CHECK1: omp.inner.for.inc: 1785 // CHECK1-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 1786 // CHECK1-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 1787 // CHECK1-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 1788 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 1789 // CHECK1: omp.inner.for.end: 1790 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1791 // CHECK1: omp.loop.exit: 1792 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 1793 // CHECK1-NEXT: ret void 1794 // 1795 // 1796 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 1797 // CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 1798 // CHECK1-NEXT: entry: 1799 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1800 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1801 // CHECK1-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 1802 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 1803 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1804 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1805 // CHECK1-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 1806 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1807 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1808 // CHECK1-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 1809 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 1810 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1811 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1812 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 1813 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 1814 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 1815 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1816 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 1817 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 1818 // CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 1819 // CHECK1-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1820 // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 1821 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1822 // CHECK1-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 1823 // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 1824 // CHECK1-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 1825 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 1826 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 1827 // CHECK1-NEXT: ret void 1828 // 1829 // 1830 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 1831 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 1832 // CHECK1-NEXT: entry: 1833 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1834 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1835 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1836 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1837 // CHECK1-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 1838 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 1839 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1840 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 1841 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1842 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1843 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1844 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1845 // CHECK1-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 1846 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 1847 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1848 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1849 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 1850 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 1851 // CHECK1-NEXT: ret void 1852 // 1853 // 1854 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 1855 // CHECK1-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 1856 // CHECK1-NEXT: entry: 1857 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1858 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1859 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 1860 // CHECK1-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1861 // CHECK1-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1862 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1863 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1864 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 1865 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1866 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1867 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 1868 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 1869 // CHECK1-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 1870 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 1871 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 1872 // CHECK1-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 1873 // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 1874 // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 1875 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 1876 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 1877 // CHECK1-NEXT: ret void 1878 // 1879 // 1880 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14 1881 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 1882 // CHECK1-NEXT: entry: 1883 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1884 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1885 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 1886 // CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 1887 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 1888 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 1889 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 1890 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 1891 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 1892 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 1893 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1894 // CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8 1895 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1896 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1897 // CHECK1-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 1898 // CHECK1-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 1899 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 1900 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 1901 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 1902 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 1903 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 1904 // CHECK1-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 1905 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 1906 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1907 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1908 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1909 // CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 1910 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1911 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 1912 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1913 // CHECK1: cond.true: 1914 // CHECK1-NEXT: br label [[COND_END:%.*]] 1915 // CHECK1: cond.false: 1916 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1917 // CHECK1-NEXT: br label [[COND_END]] 1918 // CHECK1: cond.end: 1919 // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1920 // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 1921 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 1922 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 1923 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1924 // CHECK1: omp.inner.for.cond: 1925 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 1926 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 1927 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 1928 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1929 // CHECK1: omp.inner.for.body: 1930 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 1931 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 1932 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 1933 // CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8 1934 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8 1935 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 1936 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8 1937 // CHECK1-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8 1938 // CHECK1-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 1939 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 1940 // CHECK1-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 1941 // CHECK1-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8 1942 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 1943 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1944 // CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 1945 // CHECK1-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4 1946 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1947 // CHECK1: omp.body.continue: 1948 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1949 // CHECK1: omp.inner.for.inc: 1950 // CHECK1-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 1951 // CHECK1-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 1952 // CHECK1-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 1953 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 1954 // CHECK1: omp.inner.for.end: 1955 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1956 // CHECK1: omp.loop.exit: 1957 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1958 // CHECK1-NEXT: ret void 1959 // 1960 // 1961 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1962 // CHECK1-SAME: () #[[ATTR6]] { 1963 // CHECK1-NEXT: entry: 1964 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 1965 // CHECK1-NEXT: ret void 1966 // 1967 // 1968 // CHECK2-LABEL: define {{[^@]+}}@_Z7get_valv 1969 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] { 1970 // CHECK2-NEXT: entry: 1971 // CHECK2-NEXT: ret i64 0 1972 // 1973 // 1974 // CHECK2-LABEL: define {{[^@]+}}@_Z3fooi 1975 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 1976 // CHECK2-NEXT: entry: 1977 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 1978 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 1979 // CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2 1980 // CHECK2-NEXT: [[B:%.*]] = alloca [10 x float], align 4 1981 // CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 1982 // CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 1983 // CHECK2-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 1984 // CHECK2-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 1985 // CHECK2-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 1986 // CHECK2-NEXT: [[K:%.*]] = alloca i64, align 8 1987 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 1988 // CHECK2-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 1989 // CHECK2-NEXT: [[LIN:%.*]] = alloca i32, align 4 1990 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 1991 // CHECK2-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 1992 // CHECK2-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 1993 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 1994 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 1995 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 1996 // CHECK2-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4 1997 // CHECK2-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 1998 // CHECK2-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 1999 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 2000 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 2001 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 2002 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2003 // CHECK2-NEXT: [[A_CASTED15:%.*]] = alloca i64, align 8 2004 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 2005 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 2006 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 2007 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 2008 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 2009 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 2010 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 2011 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 2012 // CHECK2-NEXT: store i16 0, i16* [[AA]], align 2 2013 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 2014 // CHECK2-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 2015 // CHECK2-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 2016 // CHECK2-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 2017 // CHECK2-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 2018 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 2019 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 2020 // CHECK2-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 2021 // CHECK2-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] 2022 // CHECK2-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 2023 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 2024 // CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) 2025 // CHECK2-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 2026 // CHECK2-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 2027 // CHECK2: omp_offload.failed: 2028 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103() #[[ATTR4:[0-9]+]] 2029 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 2030 // CHECK2: omp_offload.cont: 2031 // CHECK2-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 2032 // CHECK2-NEXT: store i64 [[CALL]], i64* [[K]], align 8 2033 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 2034 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2035 // CHECK2-NEXT: store i32 [[TMP9]], i32* [[CONV]], align 4 2036 // CHECK2-NEXT: [[TMP10:%.*]] = load i64, i64* [[A_CASTED]], align 8 2037 // CHECK2-NEXT: [[TMP11:%.*]] = load i64, i64* [[K]], align 8 2038 // CHECK2-NEXT: store i64 [[TMP11]], i64* [[K_CASTED]], align 8 2039 // CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[K_CASTED]], align 8 2040 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP10]], i64 [[TMP12]]) #[[ATTR4]] 2041 // CHECK2-NEXT: store i32 12, i32* [[LIN]], align 4 2042 // CHECK2-NEXT: [[TMP13:%.*]] = load i16, i16* [[AA]], align 2 2043 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 2044 // CHECK2-NEXT: store i16 [[TMP13]], i16* [[CONV2]], align 2 2045 // CHECK2-NEXT: [[TMP14:%.*]] = load i64, i64* [[AA_CASTED]], align 8 2046 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[LIN]], align 4 2047 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 2048 // CHECK2-NEXT: store i32 [[TMP15]], i32* [[CONV3]], align 4 2049 // CHECK2-NEXT: [[TMP16:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 2050 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[A]], align 4 2051 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* 2052 // CHECK2-NEXT: store i32 [[TMP17]], i32* [[CONV5]], align 4 2053 // CHECK2-NEXT: [[TMP18:%.*]] = load i64, i64* [[A_CASTED4]], align 8 2054 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2055 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 2056 // CHECK2-NEXT: store i64 [[TMP14]], i64* [[TMP20]], align 8 2057 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2058 // CHECK2-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 2059 // CHECK2-NEXT: store i64 [[TMP14]], i64* [[TMP22]], align 8 2060 // CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 2061 // CHECK2-NEXT: store i8* null, i8** [[TMP23]], align 8 2062 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 2063 // CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 2064 // CHECK2-NEXT: store i64 [[TMP16]], i64* [[TMP25]], align 8 2065 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 2066 // CHECK2-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* 2067 // CHECK2-NEXT: store i64 [[TMP16]], i64* [[TMP27]], align 8 2068 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 2069 // CHECK2-NEXT: store i8* null, i8** [[TMP28]], align 8 2070 // CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 2071 // CHECK2-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* 2072 // CHECK2-NEXT: store i64 [[TMP18]], i64* [[TMP30]], align 8 2073 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 2074 // CHECK2-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 2075 // CHECK2-NEXT: store i64 [[TMP18]], i64* [[TMP32]], align 8 2076 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 2077 // CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8 2078 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2079 // CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2080 // CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0 2081 // CHECK2-NEXT: [[TMP37:%.*]] = load i16, i16* [[AA]], align 2 2082 // CHECK2-NEXT: store i16 [[TMP37]], i16* [[TMP36]], align 4 2083 // CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1 2084 // CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[LIN]], align 4 2085 // CHECK2-NEXT: store i32 [[TMP39]], i32* [[TMP38]], align 4 2086 // CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2 2087 // CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4 2088 // CHECK2-NEXT: store i32 [[TMP41]], i32* [[TMP40]], align 4 2089 // CHECK2-NEXT: [[TMP42:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 2090 // CHECK2-NEXT: [[TMP43:%.*]] = bitcast i8* [[TMP42]] to %struct.kmp_task_t_with_privates* 2091 // CHECK2-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP43]], i32 0, i32 0 2092 // CHECK2-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP44]], i32 0, i32 0 2093 // CHECK2-NEXT: [[TMP46:%.*]] = load i8*, i8** [[TMP45]], align 8 2094 // CHECK2-NEXT: [[TMP47:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* 2095 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP46]], i8* align 4 [[TMP47]], i64 12, i1 false) 2096 // CHECK2-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP43]], i32 0, i32 1 2097 // CHECK2-NEXT: [[TMP49:%.*]] = bitcast i8* [[TMP46]] to %struct.anon* 2098 // CHECK2-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 0 2099 // CHECK2-NEXT: [[TMP51:%.*]] = bitcast [3 x i8*]* [[TMP50]] to i8* 2100 // CHECK2-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP34]] to i8* 2101 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP51]], i8* align 8 [[TMP52]], i64 24, i1 false) 2102 // CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 1 2103 // CHECK2-NEXT: [[TMP54:%.*]] = bitcast [3 x i8*]* [[TMP53]] to i8* 2104 // CHECK2-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP35]] to i8* 2105 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP54]], i8* align 8 [[TMP55]], i64 24, i1 false) 2106 // CHECK2-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 2 2107 // CHECK2-NEXT: [[TMP57:%.*]] = bitcast [3 x i64]* [[TMP56]] to i8* 2108 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP57]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes to i8*), i64 24, i1 false) 2109 // CHECK2-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 3 2110 // CHECK2-NEXT: [[TMP59:%.*]] = load i16, i16* [[AA]], align 2 2111 // CHECK2-NEXT: store i16 [[TMP59]], i16* [[TMP58]], align 8 2112 // CHECK2-NEXT: [[TMP60:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP42]]) 2113 // CHECK2-NEXT: [[TMP61:%.*]] = load i32, i32* [[A]], align 4 2114 // CHECK2-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* 2115 // CHECK2-NEXT: store i32 [[TMP61]], i32* [[CONV7]], align 4 2116 // CHECK2-NEXT: [[TMP62:%.*]] = load i64, i64* [[A_CASTED6]], align 8 2117 // CHECK2-NEXT: [[TMP63:%.*]] = load i16, i16* [[AA]], align 2 2118 // CHECK2-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* 2119 // CHECK2-NEXT: store i16 [[TMP63]], i16* [[CONV9]], align 2 2120 // CHECK2-NEXT: [[TMP64:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 2121 // CHECK2-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_ADDR]], align 4 2122 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP65]], 10 2123 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 2124 // CHECK2: omp_if.then: 2125 // CHECK2-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 2126 // CHECK2-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* 2127 // CHECK2-NEXT: store i64 [[TMP62]], i64* [[TMP67]], align 8 2128 // CHECK2-NEXT: [[TMP68:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 2129 // CHECK2-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 2130 // CHECK2-NEXT: store i64 [[TMP62]], i64* [[TMP69]], align 8 2131 // CHECK2-NEXT: [[TMP70:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 2132 // CHECK2-NEXT: store i8* null, i8** [[TMP70]], align 8 2133 // CHECK2-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 2134 // CHECK2-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* 2135 // CHECK2-NEXT: store i64 [[TMP64]], i64* [[TMP72]], align 8 2136 // CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 2137 // CHECK2-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64* 2138 // CHECK2-NEXT: store i64 [[TMP64]], i64* [[TMP74]], align 8 2139 // CHECK2-NEXT: [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 2140 // CHECK2-NEXT: store i8* null, i8** [[TMP75]], align 8 2141 // CHECK2-NEXT: [[TMP76:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 2142 // CHECK2-NEXT: [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 2143 // CHECK2-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146.region_id, i32 2, i8** [[TMP76]], i8** [[TMP77]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 2144 // CHECK2-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0 2145 // CHECK2-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]] 2146 // CHECK2: omp_offload.failed13: 2147 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i64 [[TMP62]], i64 [[TMP64]]) #[[ATTR4]] 2148 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT14]] 2149 // CHECK2: omp_offload.cont14: 2150 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 2151 // CHECK2: omp_if.else: 2152 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i64 [[TMP62]], i64 [[TMP64]]) #[[ATTR4]] 2153 // CHECK2-NEXT: br label [[OMP_IF_END]] 2154 // CHECK2: omp_if.end: 2155 // CHECK2-NEXT: [[TMP80:%.*]] = load i32, i32* [[A]], align 4 2156 // CHECK2-NEXT: store i32 [[TMP80]], i32* [[DOTCAPTURE_EXPR_]], align 4 2157 // CHECK2-NEXT: [[TMP81:%.*]] = load i32, i32* [[A]], align 4 2158 // CHECK2-NEXT: [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32* 2159 // CHECK2-NEXT: store i32 [[TMP81]], i32* [[CONV16]], align 4 2160 // CHECK2-NEXT: [[TMP82:%.*]] = load i64, i64* [[A_CASTED15]], align 8 2161 // CHECK2-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2162 // CHECK2-NEXT: [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 2163 // CHECK2-NEXT: store i32 [[TMP83]], i32* [[CONV17]], align 4 2164 // CHECK2-NEXT: [[TMP84:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 2165 // CHECK2-NEXT: [[TMP85:%.*]] = load i32, i32* [[N_ADDR]], align 4 2166 // CHECK2-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP85]], 20 2167 // CHECK2-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]] 2168 // CHECK2: omp_if.then19: 2169 // CHECK2-NEXT: [[TMP86:%.*]] = mul nuw i64 [[TMP2]], 4 2170 // CHECK2-NEXT: [[TMP87:%.*]] = mul nuw i64 5, [[TMP5]] 2171 // CHECK2-NEXT: [[TMP88:%.*]] = mul nuw i64 [[TMP87]], 8 2172 // CHECK2-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 2173 // CHECK2-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* 2174 // CHECK2-NEXT: store i64 [[TMP82]], i64* [[TMP90]], align 8 2175 // CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 2176 // CHECK2-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* 2177 // CHECK2-NEXT: store i64 [[TMP82]], i64* [[TMP92]], align 8 2178 // CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 2179 // CHECK2-NEXT: store i64 4, i64* [[TMP93]], align 8 2180 // CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 2181 // CHECK2-NEXT: store i8* null, i8** [[TMP94]], align 8 2182 // CHECK2-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 2183 // CHECK2-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** 2184 // CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 8 2185 // CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 2186 // CHECK2-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x float]** 2187 // CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP98]], align 8 2188 // CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 2189 // CHECK2-NEXT: store i64 40, i64* [[TMP99]], align 8 2190 // CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 2191 // CHECK2-NEXT: store i8* null, i8** [[TMP100]], align 8 2192 // CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 2193 // CHECK2-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i64* 2194 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP102]], align 8 2195 // CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 2196 // CHECK2-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i64* 2197 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP104]], align 8 2198 // CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 2199 // CHECK2-NEXT: store i64 8, i64* [[TMP105]], align 8 2200 // CHECK2-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 2201 // CHECK2-NEXT: store i8* null, i8** [[TMP106]], align 8 2202 // CHECK2-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 2203 // CHECK2-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** 2204 // CHECK2-NEXT: store float* [[VLA]], float** [[TMP108]], align 8 2205 // CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 2206 // CHECK2-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to float** 2207 // CHECK2-NEXT: store float* [[VLA]], float** [[TMP110]], align 8 2208 // CHECK2-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 2209 // CHECK2-NEXT: store i64 [[TMP86]], i64* [[TMP111]], align 8 2210 // CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 2211 // CHECK2-NEXT: store i8* null, i8** [[TMP112]], align 8 2212 // CHECK2-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 2213 // CHECK2-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** 2214 // CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 8 2215 // CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 2216 // CHECK2-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** 2217 // CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 2218 // CHECK2-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 2219 // CHECK2-NEXT: store i64 400, i64* [[TMP117]], align 8 2220 // CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 2221 // CHECK2-NEXT: store i8* null, i8** [[TMP118]], align 8 2222 // CHECK2-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 2223 // CHECK2-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* 2224 // CHECK2-NEXT: store i64 5, i64* [[TMP120]], align 8 2225 // CHECK2-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 2226 // CHECK2-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* 2227 // CHECK2-NEXT: store i64 5, i64* [[TMP122]], align 8 2228 // CHECK2-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 2229 // CHECK2-NEXT: store i64 8, i64* [[TMP123]], align 8 2230 // CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 2231 // CHECK2-NEXT: store i8* null, i8** [[TMP124]], align 8 2232 // CHECK2-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 2233 // CHECK2-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* 2234 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 2235 // CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 2236 // CHECK2-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* 2237 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 2238 // CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 2239 // CHECK2-NEXT: store i64 8, i64* [[TMP129]], align 8 2240 // CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 2241 // CHECK2-NEXT: store i8* null, i8** [[TMP130]], align 8 2242 // CHECK2-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 2243 // CHECK2-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** 2244 // CHECK2-NEXT: store double* [[VLA1]], double** [[TMP132]], align 8 2245 // CHECK2-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 2246 // CHECK2-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** 2247 // CHECK2-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 2248 // CHECK2-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 2249 // CHECK2-NEXT: store i64 [[TMP88]], i64* [[TMP135]], align 8 2250 // CHECK2-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 2251 // CHECK2-NEXT: store i8* null, i8** [[TMP136]], align 8 2252 // CHECK2-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 2253 // CHECK2-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** 2254 // CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 2255 // CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 2256 // CHECK2-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to %struct.TT** 2257 // CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP140]], align 8 2258 // CHECK2-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 2259 // CHECK2-NEXT: store i64 16, i64* [[TMP141]], align 8 2260 // CHECK2-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 2261 // CHECK2-NEXT: store i8* null, i8** [[TMP142]], align 8 2262 // CHECK2-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 2263 // CHECK2-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* 2264 // CHECK2-NEXT: store i64 [[TMP84]], i64* [[TMP144]], align 8 2265 // CHECK2-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 2266 // CHECK2-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* 2267 // CHECK2-NEXT: store i64 [[TMP84]], i64* [[TMP146]], align 8 2268 // CHECK2-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 2269 // CHECK2-NEXT: store i64 4, i64* [[TMP147]], align 8 2270 // CHECK2-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 2271 // CHECK2-NEXT: store i8* null, i8** [[TMP148]], align 8 2272 // CHECK2-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 2273 // CHECK2-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 2274 // CHECK2-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 2275 // CHECK2-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 2276 // CHECK2-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 2277 // CHECK2-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] 2278 // CHECK2: omp_offload.failed23: 2279 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] 2280 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT24]] 2281 // CHECK2: omp_offload.cont24: 2282 // CHECK2-NEXT: br label [[OMP_IF_END26:%.*]] 2283 // CHECK2: omp_if.else25: 2284 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] 2285 // CHECK2-NEXT: br label [[OMP_IF_END26]] 2286 // CHECK2: omp_if.end26: 2287 // CHECK2-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 2288 // CHECK2-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 2289 // CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) 2290 // CHECK2-NEXT: ret i32 [[TMP154]] 2291 // 2292 // 2293 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 2294 // CHECK2-SAME: () #[[ATTR2:[0-9]+]] { 2295 // CHECK2-NEXT: entry: 2296 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 2297 // CHECK2-NEXT: ret void 2298 // 2299 // 2300 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 2301 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 2302 // CHECK2-NEXT: entry: 2303 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2304 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2305 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2306 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2307 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2308 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2309 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2310 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2311 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2312 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2313 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2314 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2315 // CHECK2-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 2316 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2317 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2318 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2319 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2320 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2321 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2322 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 2323 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2324 // CHECK2: cond.true: 2325 // CHECK2-NEXT: br label [[COND_END:%.*]] 2326 // CHECK2: cond.false: 2327 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2328 // CHECK2-NEXT: br label [[COND_END]] 2329 // CHECK2: cond.end: 2330 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 2331 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2332 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2333 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 2334 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2335 // CHECK2: omp.inner.for.cond: 2336 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2337 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2338 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 2339 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2340 // CHECK2: omp.inner.for.body: 2341 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2342 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 2343 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 2344 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4 2345 // CHECK2-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 2346 // CHECK2-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 2347 // CHECK2-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 2348 // CHECK2: .cancel.exit: 2349 // CHECK2-NEXT: br label [[CANCEL_EXIT:%.*]] 2350 // CHECK2: .cancel.continue: 2351 // CHECK2-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 2352 // CHECK2-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 2353 // CHECK2-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 2354 // CHECK2: .cancel.exit2: 2355 // CHECK2-NEXT: br label [[CANCEL_EXIT]] 2356 // CHECK2: .cancel.continue3: 2357 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2358 // CHECK2: omp.body.continue: 2359 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2360 // CHECK2: omp.inner.for.inc: 2361 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2362 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 2363 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 2364 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 2365 // CHECK2: omp.inner.for.end: 2366 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2367 // CHECK2: omp.loop.exit: 2368 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 2369 // CHECK2-NEXT: br label [[CANCEL_CONT:%.*]] 2370 // CHECK2: cancel.cont: 2371 // CHECK2-NEXT: ret void 2372 // CHECK2: cancel.exit: 2373 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 2374 // CHECK2-NEXT: br label [[CANCEL_CONT]] 2375 // 2376 // 2377 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110 2378 // CHECK2-SAME: (i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 2379 // CHECK2-NEXT: entry: 2380 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2381 // CHECK2-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 2382 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2383 // CHECK2-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 2384 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2385 // CHECK2-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 2386 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2387 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 2388 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2389 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 2390 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 2391 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 2392 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 2393 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 2394 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 2395 // CHECK2-NEXT: ret void 2396 // 2397 // 2398 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1 2399 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 2400 // CHECK2-NEXT: entry: 2401 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2402 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2403 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2404 // CHECK2-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 2405 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2406 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2407 // CHECK2-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 2408 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2409 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2410 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2411 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2412 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2413 // CHECK2-NEXT: [[K1:%.*]] = alloca i64, align 8 2414 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2415 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2416 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2417 // CHECK2-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 2418 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2419 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 2420 // CHECK2-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 2421 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2422 // CHECK2-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 2423 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2424 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2425 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2426 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 2427 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 2428 // CHECK2-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 35, i32 0, i32 8, i32 1, i32 1) 2429 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 2430 // CHECK2: omp.dispatch.cond: 2431 // CHECK2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 2432 // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 2433 // CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 2434 // CHECK2: omp.dispatch.body: 2435 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2436 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 2437 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2438 // CHECK2: omp.inner.for.cond: 2439 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 2440 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 2441 // CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 2442 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2443 // CHECK2: omp.inner.for.body: 2444 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 2445 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 2446 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 2447 // CHECK2-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !12 2448 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !12 2449 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 2450 // CHECK2-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 2451 // CHECK2-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 2452 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] 2453 // CHECK2-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !12 2454 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !12 2455 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 2456 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !12 2457 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2458 // CHECK2: omp.body.continue: 2459 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2460 // CHECK2: omp.inner.for.inc: 2461 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 2462 // CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 2463 // CHECK2-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 2464 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 2465 // CHECK2: omp.inner.for.end: 2466 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 2467 // CHECK2: omp.dispatch.inc: 2468 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]] 2469 // CHECK2: omp.dispatch.end: 2470 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2471 // CHECK2-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 2472 // CHECK2-NEXT: br i1 [[TMP13]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 2473 // CHECK2: .omp.linear.pu: 2474 // CHECK2-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8 2475 // CHECK2-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP14]], 27 2476 // CHECK2-NEXT: store i64 [[ADD6]], i64* [[K_ADDR]], align 8 2477 // CHECK2-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 2478 // CHECK2: .omp.linear.pu.done: 2479 // CHECK2-NEXT: ret void 2480 // 2481 // 2482 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 2483 // CHECK2-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR2]] { 2484 // CHECK2-NEXT: entry: 2485 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 2486 // CHECK2-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 2487 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2488 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 2489 // CHECK2-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 2490 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2491 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 2492 // CHECK2-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 2493 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2494 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 2495 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 2496 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2497 // CHECK2-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 2498 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 2499 // CHECK2-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 2500 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 2501 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 2502 // CHECK2-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 2503 // CHECK2-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 2504 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 2505 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 2506 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2507 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 2508 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 2509 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 2510 // CHECK2-NEXT: ret void 2511 // 2512 // 2513 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2 2514 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR3]] { 2515 // CHECK2-NEXT: entry: 2516 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2517 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2518 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 2519 // CHECK2-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 2520 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2521 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 2522 // CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8 2523 // CHECK2-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 2524 // CHECK2-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 2525 // CHECK2-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 2526 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 2527 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 2528 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 2529 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2530 // CHECK2-NEXT: [[IT:%.*]] = alloca i64, align 8 2531 // CHECK2-NEXT: [[LIN4:%.*]] = alloca i32, align 4 2532 // CHECK2-NEXT: [[A5:%.*]] = alloca i32, align 4 2533 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2534 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2535 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 2536 // CHECK2-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 2537 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2538 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 2539 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 2540 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2541 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 2542 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 2543 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 2544 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 2545 // CHECK2-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 2546 // CHECK2-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 2547 // CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 2548 // CHECK2-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 2549 // CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 2550 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2551 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2552 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 2553 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 2554 // CHECK2-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 2555 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 2556 // CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 2557 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2558 // CHECK2: cond.true: 2559 // CHECK2-NEXT: br label [[COND_END:%.*]] 2560 // CHECK2: cond.false: 2561 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 2562 // CHECK2-NEXT: br label [[COND_END]] 2563 // CHECK2: cond.end: 2564 // CHECK2-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 2565 // CHECK2-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 2566 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 2567 // CHECK2-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 2568 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2569 // CHECK2: omp.inner.for.cond: 2570 // CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 2571 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 2572 // CHECK2-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 2573 // CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2574 // CHECK2: omp.inner.for.body: 2575 // CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 2576 // CHECK2-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 2577 // CHECK2-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 2578 // CHECK2-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 2579 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 2580 // CHECK2-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 2581 // CHECK2-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 2582 // CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 2583 // CHECK2-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 2584 // CHECK2-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 2585 // CHECK2-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 2586 // CHECK2-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4 2587 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 2588 // CHECK2-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 2589 // CHECK2-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 2590 // CHECK2-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 2591 // CHECK2-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 2592 // CHECK2-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 2593 // CHECK2-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 2594 // CHECK2-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4 2595 // CHECK2-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8 2596 // CHECK2-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 2597 // CHECK2-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 2598 // CHECK2-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 2599 // CHECK2-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8 2600 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2601 // CHECK2: omp.body.continue: 2602 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2603 // CHECK2: omp.inner.for.inc: 2604 // CHECK2-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 2605 // CHECK2-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 2606 // CHECK2-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8 2607 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 2608 // CHECK2: omp.inner.for.end: 2609 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2610 // CHECK2: omp.loop.exit: 2611 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 2612 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2613 // CHECK2-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 2614 // CHECK2-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 2615 // CHECK2: .omp.linear.pu: 2616 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 2617 // CHECK2-NEXT: [[CONV18:%.*]] = sext i32 [[TMP20]] to i64 2618 // CHECK2-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 2619 // CHECK2-NEXT: [[MUL19:%.*]] = mul i64 4, [[TMP21]] 2620 // CHECK2-NEXT: [[ADD20:%.*]] = add i64 [[CONV18]], [[MUL19]] 2621 // CHECK2-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD20]] to i32 2622 // CHECK2-NEXT: store i32 [[CONV21]], i32* [[CONV1]], align 8 2623 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 2624 // CHECK2-NEXT: [[CONV22:%.*]] = sext i32 [[TMP22]] to i64 2625 // CHECK2-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 2626 // CHECK2-NEXT: [[MUL23:%.*]] = mul i64 4, [[TMP23]] 2627 // CHECK2-NEXT: [[ADD24:%.*]] = add i64 [[CONV22]], [[MUL23]] 2628 // CHECK2-NEXT: [[CONV25:%.*]] = trunc i64 [[ADD24]] to i32 2629 // CHECK2-NEXT: store i32 [[CONV25]], i32* [[CONV2]], align 8 2630 // CHECK2-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 2631 // CHECK2: .omp.linear.pu.done: 2632 // CHECK2-NEXT: ret void 2633 // 2634 // 2635 // CHECK2-LABEL: define {{[^@]+}}@.omp_task_privates_map. 2636 // CHECK2-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i16** noalias [[TMP1:%.*]], [3 x i8*]** noalias [[TMP2:%.*]], [3 x i8*]** noalias [[TMP3:%.*]], [3 x i64]** noalias [[TMP4:%.*]]) #[[ATTR6:[0-9]+]] { 2637 // CHECK2-NEXT: entry: 2638 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8 2639 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i16**, align 8 2640 // CHECK2-NEXT: [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 8 2641 // CHECK2-NEXT: [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 8 2642 // CHECK2-NEXT: [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 8 2643 // CHECK2-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8 2644 // CHECK2-NEXT: store i16** [[TMP1]], i16*** [[DOTADDR1]], align 8 2645 // CHECK2-NEXT: store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 8 2646 // CHECK2-NEXT: store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 8 2647 // CHECK2-NEXT: store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 8 2648 // CHECK2-NEXT: [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8 2649 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0 2650 // CHECK2-NEXT: [[TMP7:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 8 2651 // CHECK2-NEXT: store [3 x i8*]* [[TMP6]], [3 x i8*]** [[TMP7]], align 8 2652 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1 2653 // CHECK2-NEXT: [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 8 2654 // CHECK2-NEXT: store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 8 2655 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2 2656 // CHECK2-NEXT: [[TMP11:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 8 2657 // CHECK2-NEXT: store [3 x i64]* [[TMP10]], [3 x i64]** [[TMP11]], align 8 2658 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3 2659 // CHECK2-NEXT: [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 8 2660 // CHECK2-NEXT: store i16* [[TMP12]], i16** [[TMP13]], align 8 2661 // CHECK2-NEXT: ret void 2662 // 2663 // 2664 // CHECK2-LABEL: define {{[^@]+}}@.omp_task_entry. 2665 // CHECK2-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] { 2666 // CHECK2-NEXT: entry: 2667 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 2668 // CHECK2-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 2669 // CHECK2-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 2670 // CHECK2-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 2671 // CHECK2-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 2672 // CHECK2-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 2673 // CHECK2-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 8 2674 // CHECK2-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 8 2675 // CHECK2-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 8 2676 // CHECK2-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 8 2677 // CHECK2-NEXT: [[AA_CASTED_I:%.*]] = alloca i64, align 8 2678 // CHECK2-NEXT: [[LIN_CASTED_I:%.*]] = alloca i64, align 8 2679 // CHECK2-NEXT: [[A_CASTED_I:%.*]] = alloca i64, align 8 2680 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 2681 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 2682 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 2683 // CHECK2-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 2684 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 2685 // CHECK2-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 2686 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 2687 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 2688 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 2689 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2690 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 2691 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 2692 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* 2693 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 2694 // CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) 2695 // CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) 2696 // CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) 2697 // CHECK2-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) 2698 // CHECK2-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 2699 // CHECK2-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 2700 // CHECK2-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 2701 // CHECK2-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 2702 // CHECK2-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 2703 // CHECK2-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !24 2704 // CHECK2-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !24 2705 // CHECK2-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 2706 // CHECK2-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 2707 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* 2708 // CHECK2-NEXT: call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] 2709 // CHECK2-NEXT: [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !24 2710 // CHECK2-NEXT: [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !24 2711 // CHECK2-NEXT: [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !24 2712 // CHECK2-NEXT: [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !24 2713 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i64 0, i64 0 2714 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i64 0, i64 0 2715 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i64 0, i64 0 2716 // CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1 2717 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2 2718 // CHECK2-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 2719 // CHECK2-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 2720 // CHECK2-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] 2721 // CHECK2: omp_offload.failed.i: 2722 // CHECK2-NEXT: [[TMP27:%.*]] = load i16, i16* [[TMP16]], align 2 2723 // CHECK2-NEXT: [[CONV_I:%.*]] = bitcast i64* [[AA_CASTED_I]] to i16* 2724 // CHECK2-NEXT: store i16 [[TMP27]], i16* [[CONV_I]], align 2, !noalias !24 2725 // CHECK2-NEXT: [[TMP28:%.*]] = load i64, i64* [[AA_CASTED_I]], align 8, !noalias !24 2726 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP23]], align 4 2727 // CHECK2-NEXT: [[CONV4_I:%.*]] = bitcast i64* [[LIN_CASTED_I]] to i32* 2728 // CHECK2-NEXT: store i32 [[TMP29]], i32* [[CONV4_I]], align 4, !noalias !24 2729 // CHECK2-NEXT: [[TMP30:%.*]] = load i64, i64* [[LIN_CASTED_I]], align 8, !noalias !24 2730 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP24]], align 4 2731 // CHECK2-NEXT: [[CONV5_I:%.*]] = bitcast i64* [[A_CASTED_I]] to i32* 2732 // CHECK2-NEXT: store i32 [[TMP31]], i32* [[CONV5_I]], align 4, !noalias !24 2733 // CHECK2-NEXT: [[TMP32:%.*]] = load i64, i64* [[A_CASTED_I]], align 8, !noalias !24 2734 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138(i64 [[TMP28]], i64 [[TMP30]], i64 [[TMP32]]) #[[ATTR4]] 2735 // CHECK2-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] 2736 // CHECK2: .omp_outlined..3.exit: 2737 // CHECK2-NEXT: ret i32 0 2738 // 2739 // 2740 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 2741 // CHECK2-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] { 2742 // CHECK2-NEXT: entry: 2743 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2744 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 2745 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2746 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 2747 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2748 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 2749 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2750 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 2751 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 2752 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2753 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 2754 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 2755 // CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 2756 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 2757 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 2758 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 2759 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 2760 // CHECK2-NEXT: ret void 2761 // 2762 // 2763 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 2764 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR3]] { 2765 // CHECK2-NEXT: entry: 2766 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2767 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2768 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2769 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 2770 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2771 // CHECK2-NEXT: [[TMP:%.*]] = alloca i16, align 2 2772 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2773 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2774 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2775 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2776 // CHECK2-NEXT: [[IT:%.*]] = alloca i16, align 2 2777 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2778 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2779 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2780 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 2781 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2782 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 2783 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2784 // CHECK2-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 2785 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2786 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2787 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2788 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2789 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2790 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2791 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 2792 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2793 // CHECK2: cond.true: 2794 // CHECK2-NEXT: br label [[COND_END:%.*]] 2795 // CHECK2: cond.false: 2796 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2797 // CHECK2-NEXT: br label [[COND_END]] 2798 // CHECK2: cond.end: 2799 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 2800 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2801 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2802 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 2803 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2804 // CHECK2: omp.inner.for.cond: 2805 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2806 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2807 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 2808 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2809 // CHECK2: omp.inner.for.body: 2810 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2811 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 2812 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 2813 // CHECK2-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 2814 // CHECK2-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2 2815 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 2816 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 2817 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8 2818 // CHECK2-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8 2819 // CHECK2-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 2820 // CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 2821 // CHECK2-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 2822 // CHECK2-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8 2823 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2824 // CHECK2: omp.body.continue: 2825 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2826 // CHECK2: omp.inner.for.inc: 2827 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2828 // CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 2829 // CHECK2-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 2830 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 2831 // CHECK2: omp.inner.for.end: 2832 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2833 // CHECK2: omp.loop.exit: 2834 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 2835 // CHECK2-NEXT: ret void 2836 // 2837 // 2838 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 2839 // CHECK2-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 2840 // CHECK2-NEXT: entry: 2841 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2842 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 2843 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2844 // CHECK2-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 2845 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 2846 // CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 2847 // CHECK2-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 2848 // CHECK2-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 2849 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 2850 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2851 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 2852 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 2853 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2854 // CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 2855 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2856 // CHECK2-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 2857 // CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 2858 // CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 2859 // CHECK2-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 2860 // CHECK2-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 2861 // CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 2862 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2863 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2864 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 2865 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2866 // CHECK2-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 2867 // CHECK2-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 2868 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 2869 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 2870 // CHECK2-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 2871 // CHECK2-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 2872 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2873 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 2874 // CHECK2-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 2875 // CHECK2-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 2876 // CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 2877 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 2878 // CHECK2-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 2879 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 2880 // CHECK2-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 2881 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 2882 // CHECK2-NEXT: ret void 2883 // 2884 // 2885 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7 2886 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 2887 // CHECK2-NEXT: entry: 2888 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2889 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2890 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 2891 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 2892 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2893 // CHECK2-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 2894 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 2895 // CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 2896 // CHECK2-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 2897 // CHECK2-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 2898 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 2899 // CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2900 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2901 // CHECK2-NEXT: [[TMP:%.*]] = alloca i8, align 1 2902 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2903 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2904 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2905 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2906 // CHECK2-NEXT: [[IT:%.*]] = alloca i8, align 1 2907 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2908 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2909 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 2910 // CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 2911 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2912 // CHECK2-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 2913 // CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 2914 // CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 2915 // CHECK2-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 2916 // CHECK2-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 2917 // CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 2918 // CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2919 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 2920 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 2921 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2922 // CHECK2-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 2923 // CHECK2-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 2924 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 2925 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 2926 // CHECK2-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 2927 // CHECK2-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 2928 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2929 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2930 // CHECK2-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 2931 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2932 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2933 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 2934 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2935 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 2936 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 2937 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 2938 // CHECK2: omp.dispatch.cond: 2939 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2940 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 2941 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2942 // CHECK2: cond.true: 2943 // CHECK2-NEXT: br label [[COND_END:%.*]] 2944 // CHECK2: cond.false: 2945 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2946 // CHECK2-NEXT: br label [[COND_END]] 2947 // CHECK2: cond.end: 2948 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 2949 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2950 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2951 // CHECK2-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 2952 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2953 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2954 // CHECK2-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 2955 // CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 2956 // CHECK2: omp.dispatch.body: 2957 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2958 // CHECK2: omp.inner.for.cond: 2959 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2960 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2961 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 2962 // CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2963 // CHECK2: omp.inner.for.body: 2964 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2965 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 2966 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 2967 // CHECK2-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 2968 // CHECK2-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1 2969 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8 2970 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 2971 // CHECK2-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 2972 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 2973 // CHECK2-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 2974 // CHECK2-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 2975 // CHECK2-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 2976 // CHECK2-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 2977 // CHECK2-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4 2978 // CHECK2-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 2979 // CHECK2-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4 2980 // CHECK2-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 2981 // CHECK2-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 2982 // CHECK2-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 2983 // CHECK2-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4 2984 // CHECK2-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 2985 // CHECK2-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 2986 // CHECK2-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8 2987 // CHECK2-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 2988 // CHECK2-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8 2989 // CHECK2-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 2990 // CHECK2-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 2991 // CHECK2-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 2992 // CHECK2-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8 2993 // CHECK2-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 2994 // CHECK2-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8 2995 // CHECK2-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 2996 // CHECK2-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8 2997 // CHECK2-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 2998 // CHECK2-NEXT: store i64 [[ADD22]], i64* [[X]], align 8 2999 // CHECK2-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 3000 // CHECK2-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8 3001 // CHECK2-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 3002 // CHECK2-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 3003 // CHECK2-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 3004 // CHECK2-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8 3005 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3006 // CHECK2: omp.body.continue: 3007 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3008 // CHECK2: omp.inner.for.inc: 3009 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3010 // CHECK2-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 3011 // CHECK2-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4 3012 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 3013 // CHECK2: omp.inner.for.end: 3014 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 3015 // CHECK2: omp.dispatch.inc: 3016 // CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3017 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 3018 // CHECK2-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 3019 // CHECK2-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 3020 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3021 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 3022 // CHECK2-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 3023 // CHECK2-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 3024 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]] 3025 // CHECK2: omp.dispatch.end: 3026 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 3027 // CHECK2-NEXT: ret void 3028 // 3029 // 3030 // CHECK2-LABEL: define {{[^@]+}}@_Z3bari 3031 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 3032 // CHECK2-NEXT: entry: 3033 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3034 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 3035 // CHECK2-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 3036 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3037 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 3038 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3039 // CHECK2-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 3040 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 3041 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 3042 // CHECK2-NEXT: store i32 [[ADD]], i32* [[A]], align 4 3043 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 3044 // CHECK2-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 3045 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 3046 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 3047 // CHECK2-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 3048 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 3049 // CHECK2-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 3050 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 3051 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 3052 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 3053 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 3054 // CHECK2-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 3055 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 3056 // CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 3057 // CHECK2-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 3058 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 3059 // CHECK2-NEXT: ret i32 [[TMP8]] 3060 // 3061 // 3062 // CHECK2-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 3063 // CHECK2-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 3064 // CHECK2-NEXT: entry: 3065 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3066 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3067 // CHECK2-NEXT: [[B:%.*]] = alloca i32, align 4 3068 // CHECK2-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 3069 // CHECK2-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 3070 // CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 3071 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 3072 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 3073 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 3074 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 3075 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3076 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3077 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3078 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 3079 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 3080 // CHECK2-NEXT: store i32 [[ADD]], i32* [[B]], align 4 3081 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 3082 // CHECK2-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 3083 // CHECK2-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 3084 // CHECK2-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 3085 // CHECK2-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 3086 // CHECK2-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 3087 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 3088 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 3089 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 3090 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4 3091 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 3092 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4 3093 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 3094 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 3095 // CHECK2: omp_if.then: 3096 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 3097 // CHECK2-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]] 3098 // CHECK2-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 3099 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3100 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 3101 // CHECK2-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8 3102 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3103 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 3104 // CHECK2-NEXT: store double* [[A]], double** [[TMP13]], align 8 3105 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 3106 // CHECK2-NEXT: store i64 8, i64* [[TMP14]], align 8 3107 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3108 // CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 3109 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3110 // CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 3111 // CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 3112 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3113 // CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 3114 // CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 3115 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 3116 // CHECK2-NEXT: store i64 4, i64* [[TMP20]], align 8 3117 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 3118 // CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 3119 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3120 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* 3121 // CHECK2-NEXT: store i64 2, i64* [[TMP23]], align 8 3122 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3123 // CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 3124 // CHECK2-NEXT: store i64 2, i64* [[TMP25]], align 8 3125 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 3126 // CHECK2-NEXT: store i64 8, i64* [[TMP26]], align 8 3127 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 3128 // CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 8 3129 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 3130 // CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 3131 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 3132 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 3133 // CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* 3134 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 3135 // CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 3136 // CHECK2-NEXT: store i64 8, i64* [[TMP32]], align 8 3137 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 3138 // CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8 3139 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 3140 // CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 3141 // CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 3142 // CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 3143 // CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 3144 // CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 3145 // CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 3146 // CHECK2-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 3147 // CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 3148 // CHECK2-NEXT: store i8* null, i8** [[TMP39]], align 8 3149 // CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3150 // CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3151 // CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 3152 // CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3153 // CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 3154 // CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3155 // CHECK2: omp_offload.failed: 3156 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 3157 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 3158 // CHECK2: omp_offload.cont: 3159 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 3160 // CHECK2: omp_if.else: 3161 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 3162 // CHECK2-NEXT: br label [[OMP_IF_END]] 3163 // CHECK2: omp_if.end: 3164 // CHECK2-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] 3165 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] 3166 // CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 3167 // CHECK2-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 3168 // CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 3169 // CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 3170 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] 3171 // CHECK2-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 3172 // CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 3173 // CHECK2-NEXT: ret i32 [[ADD4]] 3174 // 3175 // 3176 // CHECK2-LABEL: define {{[^@]+}}@_ZL7fstatici 3177 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 3178 // CHECK2-NEXT: entry: 3179 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3180 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 3181 // CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2 3182 // CHECK2-NEXT: [[AAA:%.*]] = alloca i8, align 1 3183 // CHECK2-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 3184 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 3185 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 3186 // CHECK2-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 3187 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 3188 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 3189 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 3190 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3191 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 3192 // CHECK2-NEXT: store i16 0, i16* [[AA]], align 2 3193 // CHECK2-NEXT: store i8 0, i8* [[AAA]], align 1 3194 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 3195 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 3196 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 3197 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 3198 // CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 3199 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 3200 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 3201 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 3202 // CHECK2-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 3203 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 3204 // CHECK2-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 3205 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 3206 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 3207 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 3208 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 3209 // CHECK2: omp_if.then: 3210 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3211 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 3212 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 3213 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3214 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 3215 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 3216 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3217 // CHECK2-NEXT: store i8* null, i8** [[TMP11]], align 8 3218 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3219 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 3220 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 3221 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3222 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 3223 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 3224 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 3225 // CHECK2-NEXT: store i8* null, i8** [[TMP16]], align 8 3226 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3227 // CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* 3228 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 3229 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3230 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 3231 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 3232 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 3233 // CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 3234 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 3235 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 3236 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 3237 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 3238 // CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 3239 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 3240 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 3241 // CHECK2-NEXT: store i8* null, i8** [[TMP26]], align 8 3242 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3243 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3244 // CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3245 // CHECK2-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 3246 // CHECK2-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3247 // CHECK2: omp_offload.failed: 3248 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 3249 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 3250 // CHECK2: omp_offload.cont: 3251 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 3252 // CHECK2: omp_if.else: 3253 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 3254 // CHECK2-NEXT: br label [[OMP_IF_END]] 3255 // CHECK2: omp_if.end: 3256 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 3257 // CHECK2-NEXT: ret i32 [[TMP31]] 3258 // 3259 // 3260 // CHECK2-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 3261 // CHECK2-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 3262 // CHECK2-NEXT: entry: 3263 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3264 // CHECK2-NEXT: [[A:%.*]] = alloca i32, align 4 3265 // CHECK2-NEXT: [[AA:%.*]] = alloca i16, align 2 3266 // CHECK2-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 3267 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 3268 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 3269 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 3270 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 3271 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 3272 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3273 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 3274 // CHECK2-NEXT: store i16 0, i16* [[AA]], align 2 3275 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 3276 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 3277 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 3278 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 3279 // CHECK2-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 3280 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 3281 // CHECK2-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 3282 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 3283 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 3284 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 3285 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 3286 // CHECK2: omp_if.then: 3287 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3288 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 3289 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 3290 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3291 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 3292 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 3293 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3294 // CHECK2-NEXT: store i8* null, i8** [[TMP9]], align 8 3295 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3296 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* 3297 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 3298 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3299 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 3300 // CHECK2-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 3301 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 3302 // CHECK2-NEXT: store i8* null, i8** [[TMP14]], align 8 3303 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3304 // CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 3305 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 3306 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3307 // CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 3308 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 3309 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 3310 // CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 3311 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3312 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3313 // CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3314 // CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 3315 // CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3316 // CHECK2: omp_offload.failed: 3317 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 3318 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 3319 // CHECK2: omp_offload.cont: 3320 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 3321 // CHECK2: omp_if.else: 3322 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 3323 // CHECK2-NEXT: br label [[OMP_IF_END]] 3324 // CHECK2: omp_if.end: 3325 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 3326 // CHECK2-NEXT: ret i32 [[TMP24]] 3327 // 3328 // 3329 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 3330 // CHECK2-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 3331 // CHECK2-NEXT: entry: 3332 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3333 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 3334 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3335 // CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 3336 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 3337 // CHECK2-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 3338 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3339 // CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 3340 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3341 // CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 3342 // CHECK2-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 3343 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3344 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 3345 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3346 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 3347 // CHECK2-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 3348 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 3349 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 3350 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 3351 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 3352 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 3353 // CHECK2-NEXT: ret void 3354 // 3355 // 3356 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 3357 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 3358 // CHECK2-NEXT: entry: 3359 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3360 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3361 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 3362 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 3363 // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3364 // CHECK2-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 3365 // CHECK2-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 3366 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 3367 // CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8 3368 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 3369 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 3370 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 3371 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3372 // CHECK2-NEXT: [[IT:%.*]] = alloca i64, align 8 3373 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3374 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3375 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 3376 // CHECK2-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 3377 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3378 // CHECK2-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 3379 // CHECK2-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 3380 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 3381 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 3382 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3383 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 3384 // CHECK2-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 3385 // CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 3386 // CHECK2-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 3387 // CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 3388 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3389 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3390 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 3391 // CHECK2-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 3392 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3393 // CHECK2-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 3394 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3395 // CHECK2: cond.true: 3396 // CHECK2-NEXT: br label [[COND_END:%.*]] 3397 // CHECK2: cond.false: 3398 // CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3399 // CHECK2-NEXT: br label [[COND_END]] 3400 // CHECK2: cond.end: 3401 // CHECK2-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 3402 // CHECK2-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 3403 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 3404 // CHECK2-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 3405 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3406 // CHECK2: omp.inner.for.cond: 3407 // CHECK2-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 3408 // CHECK2-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3409 // CHECK2-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 3410 // CHECK2-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3411 // CHECK2: omp.inner.for.body: 3412 // CHECK2-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 3413 // CHECK2-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 3414 // CHECK2-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 3415 // CHECK2-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 3416 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8 3417 // CHECK2-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 3418 // CHECK2-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 3419 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 3420 // CHECK2-NEXT: store double [[ADD]], double* [[A]], align 8 3421 // CHECK2-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 3422 // CHECK2-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8 3423 // CHECK2-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 3424 // CHECK2-NEXT: store double [[INC]], double* [[A5]], align 8 3425 // CHECK2-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 3426 // CHECK2-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 3427 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 3428 // CHECK2-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 3429 // CHECK2-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2 3430 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3431 // CHECK2: omp.body.continue: 3432 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3433 // CHECK2: omp.inner.for.inc: 3434 // CHECK2-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 3435 // CHECK2-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 3436 // CHECK2-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 3437 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 3438 // CHECK2: omp.inner.for.end: 3439 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3440 // CHECK2: omp.loop.exit: 3441 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 3442 // CHECK2-NEXT: ret void 3443 // 3444 // 3445 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 3446 // CHECK2-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 3447 // CHECK2-NEXT: entry: 3448 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 3449 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 3450 // CHECK2-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 3451 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 3452 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 3453 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 3454 // CHECK2-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 3455 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 3456 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 3457 // CHECK2-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 3458 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 3459 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 3460 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 3461 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 3462 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 3463 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 3464 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 3465 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 3466 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 3467 // CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 3468 // CHECK2-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 3469 // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 3470 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 3471 // CHECK2-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 3472 // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 3473 // CHECK2-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 3474 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 3475 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 3476 // CHECK2-NEXT: ret void 3477 // 3478 // 3479 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 3480 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 3481 // CHECK2-NEXT: entry: 3482 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3483 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3484 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 3485 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 3486 // CHECK2-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 3487 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 3488 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3489 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 3490 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3491 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3492 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 3493 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 3494 // CHECK2-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 3495 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 3496 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 3497 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 3498 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 3499 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 3500 // CHECK2-NEXT: ret void 3501 // 3502 // 3503 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 3504 // CHECK2-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 3505 // CHECK2-NEXT: entry: 3506 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 3507 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 3508 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 3509 // CHECK2-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 3510 // CHECK2-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 3511 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 3512 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 3513 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 3514 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 3515 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 3516 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 3517 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 3518 // CHECK2-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 3519 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 3520 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 3521 // CHECK2-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 3522 // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 3523 // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 3524 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 3525 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 3526 // CHECK2-NEXT: ret void 3527 // 3528 // 3529 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14 3530 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 3531 // CHECK2-NEXT: entry: 3532 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3533 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3534 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 3535 // CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 3536 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 3537 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 3538 // CHECK2-NEXT: [[TMP:%.*]] = alloca i64, align 8 3539 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 3540 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 3541 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 3542 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3543 // CHECK2-NEXT: [[I:%.*]] = alloca i64, align 8 3544 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3545 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3546 // CHECK2-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 3547 // CHECK2-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 3548 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 3549 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 3550 // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 3551 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 3552 // CHECK2-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 3553 // CHECK2-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 3554 // CHECK2-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 3555 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3556 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3557 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 3558 // CHECK2-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 3559 // CHECK2-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3560 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 3561 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3562 // CHECK2: cond.true: 3563 // CHECK2-NEXT: br label [[COND_END:%.*]] 3564 // CHECK2: cond.false: 3565 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3566 // CHECK2-NEXT: br label [[COND_END]] 3567 // CHECK2: cond.end: 3568 // CHECK2-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 3569 // CHECK2-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 3570 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 3571 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 3572 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3573 // CHECK2: omp.inner.for.cond: 3574 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 3575 // CHECK2-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 3576 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 3577 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3578 // CHECK2: omp.inner.for.body: 3579 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 3580 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 3581 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 3582 // CHECK2-NEXT: store i64 [[ADD]], i64* [[I]], align 8 3583 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8 3584 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 3585 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8 3586 // CHECK2-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8 3587 // CHECK2-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 3588 // CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 3589 // CHECK2-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 3590 // CHECK2-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8 3591 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 3592 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 3593 // CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 3594 // CHECK2-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4 3595 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3596 // CHECK2: omp.body.continue: 3597 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3598 // CHECK2: omp.inner.for.inc: 3599 // CHECK2-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 3600 // CHECK2-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 3601 // CHECK2-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 3602 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 3603 // CHECK2: omp.inner.for.end: 3604 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3605 // CHECK2: omp.loop.exit: 3606 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 3607 // CHECK2-NEXT: ret void 3608 // 3609 // 3610 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 3611 // CHECK2-SAME: () #[[ATTR6]] { 3612 // CHECK2-NEXT: entry: 3613 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1) 3614 // CHECK2-NEXT: ret void 3615 // 3616 // 3617 // CHECK3-LABEL: define {{[^@]+}}@_Z7get_valv 3618 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] { 3619 // CHECK3-NEXT: entry: 3620 // CHECK3-NEXT: ret i64 0 3621 // 3622 // 3623 // CHECK3-LABEL: define {{[^@]+}}@_Z3fooi 3624 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 3625 // CHECK3-NEXT: entry: 3626 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 3627 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 3628 // CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2 3629 // CHECK3-NEXT: [[B:%.*]] = alloca [10 x float], align 4 3630 // CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 3631 // CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 3632 // CHECK3-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 3633 // CHECK3-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 3634 // CHECK3-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 3635 // CHECK3-NEXT: [[K:%.*]] = alloca i64, align 8 3636 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 3637 // CHECK3-NEXT: [[LIN:%.*]] = alloca i32, align 4 3638 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 3639 // CHECK3-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 3640 // CHECK3-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 3641 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 3642 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 3643 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 3644 // CHECK3-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4 3645 // CHECK3-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 3646 // CHECK3-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 3647 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 3648 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 3649 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 3650 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3651 // CHECK3-NEXT: [[A_CASTED11:%.*]] = alloca i32, align 4 3652 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 3653 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 3654 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 3655 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 3656 // CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 3657 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 3658 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 3659 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 3660 // CHECK3-NEXT: store i16 0, i16* [[AA]], align 2 3661 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 3662 // CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 3663 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 3664 // CHECK3-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 3665 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 3666 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 3667 // CHECK3-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] 3668 // CHECK3-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 3669 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 3670 // CHECK3-NEXT: [[TMP5:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) 3671 // CHECK3-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 3672 // CHECK3-NEXT: br i1 [[TMP6]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3673 // CHECK3: omp_offload.failed: 3674 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103() #[[ATTR4:[0-9]+]] 3675 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 3676 // CHECK3: omp_offload.cont: 3677 // CHECK3-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 3678 // CHECK3-NEXT: store i64 [[CALL]], i64* [[K]], align 8 3679 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 3680 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[A_CASTED]], align 4 3681 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_CASTED]], align 4 3682 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP8]], i64* [[K]]) #[[ATTR4]] 3683 // CHECK3-NEXT: store i32 12, i32* [[LIN]], align 4 3684 // CHECK3-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2 3685 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 3686 // CHECK3-NEXT: store i16 [[TMP9]], i16* [[CONV]], align 2 3687 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[AA_CASTED]], align 4 3688 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[LIN]], align 4 3689 // CHECK3-NEXT: store i32 [[TMP11]], i32* [[LIN_CASTED]], align 4 3690 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 3691 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[A]], align 4 3692 // CHECK3-NEXT: store i32 [[TMP13]], i32* [[A_CASTED2]], align 4 3693 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[A_CASTED2]], align 4 3694 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3695 // CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* 3696 // CHECK3-NEXT: store i32 [[TMP10]], i32* [[TMP16]], align 4 3697 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3698 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 3699 // CHECK3-NEXT: store i32 [[TMP10]], i32* [[TMP18]], align 4 3700 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 3701 // CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 3702 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3703 // CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* 3704 // CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP21]], align 4 3705 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3706 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 3707 // CHECK3-NEXT: store i32 [[TMP12]], i32* [[TMP23]], align 4 3708 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 3709 // CHECK3-NEXT: store i8* null, i8** [[TMP24]], align 4 3710 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3711 // CHECK3-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 3712 // CHECK3-NEXT: store i32 [[TMP14]], i32* [[TMP26]], align 4 3713 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3714 // CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 3715 // CHECK3-NEXT: store i32 [[TMP14]], i32* [[TMP28]], align 4 3716 // CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 3717 // CHECK3-NEXT: store i8* null, i8** [[TMP29]], align 4 3718 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3719 // CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3720 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0 3721 // CHECK3-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2 3722 // CHECK3-NEXT: store i16 [[TMP33]], i16* [[TMP32]], align 4 3723 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1 3724 // CHECK3-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN]], align 4 3725 // CHECK3-NEXT: store i32 [[TMP35]], i32* [[TMP34]], align 4 3726 // CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2 3727 // CHECK3-NEXT: [[TMP37:%.*]] = load i32, i32* [[A]], align 4 3728 // CHECK3-NEXT: store i32 [[TMP37]], i32* [[TMP36]], align 4 3729 // CHECK3-NEXT: [[TMP38:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 3730 // CHECK3-NEXT: [[TMP39:%.*]] = bitcast i8* [[TMP38]] to %struct.kmp_task_t_with_privates* 3731 // CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 0 3732 // CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP40]], i32 0, i32 0 3733 // CHECK3-NEXT: [[TMP42:%.*]] = load i8*, i8** [[TMP41]], align 4 3734 // CHECK3-NEXT: [[TMP43:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* 3735 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i32 12, i1 false) 3736 // CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 1 3737 // CHECK3-NEXT: [[TMP45:%.*]] = bitcast i8* [[TMP42]] to %struct.anon* 3738 // CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 0 3739 // CHECK3-NEXT: [[TMP47:%.*]] = bitcast [3 x i64]* [[TMP46]] to i8* 3740 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP47]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false) 3741 // CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 1 3742 // CHECK3-NEXT: [[TMP49:%.*]] = bitcast [3 x i8*]* [[TMP48]] to i8* 3743 // CHECK3-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP30]] to i8* 3744 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP49]], i8* align 4 [[TMP50]], i32 12, i1 false) 3745 // CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 2 3746 // CHECK3-NEXT: [[TMP52:%.*]] = bitcast [3 x i8*]* [[TMP51]] to i8* 3747 // CHECK3-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP31]] to i8* 3748 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP52]], i8* align 4 [[TMP53]], i32 12, i1 false) 3749 // CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 3 3750 // CHECK3-NEXT: [[TMP55:%.*]] = load i16, i16* [[AA]], align 2 3751 // CHECK3-NEXT: store i16 [[TMP55]], i16* [[TMP54]], align 4 3752 // CHECK3-NEXT: [[TMP56:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP38]]) 3753 // CHECK3-NEXT: [[TMP57:%.*]] = load i32, i32* [[A]], align 4 3754 // CHECK3-NEXT: store i32 [[TMP57]], i32* [[A_CASTED3]], align 4 3755 // CHECK3-NEXT: [[TMP58:%.*]] = load i32, i32* [[A_CASTED3]], align 4 3756 // CHECK3-NEXT: [[TMP59:%.*]] = load i16, i16* [[AA]], align 2 3757 // CHECK3-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* 3758 // CHECK3-NEXT: store i16 [[TMP59]], i16* [[CONV5]], align 2 3759 // CHECK3-NEXT: [[TMP60:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 3760 // CHECK3-NEXT: [[TMP61:%.*]] = load i32, i32* [[N_ADDR]], align 4 3761 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP61]], 10 3762 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 3763 // CHECK3: omp_if.then: 3764 // CHECK3-NEXT: [[TMP62:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 3765 // CHECK3-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* 3766 // CHECK3-NEXT: store i32 [[TMP58]], i32* [[TMP63]], align 4 3767 // CHECK3-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 3768 // CHECK3-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* 3769 // CHECK3-NEXT: store i32 [[TMP58]], i32* [[TMP65]], align 4 3770 // CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 3771 // CHECK3-NEXT: store i8* null, i8** [[TMP66]], align 4 3772 // CHECK3-NEXT: [[TMP67:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 3773 // CHECK3-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* 3774 // CHECK3-NEXT: store i32 [[TMP60]], i32* [[TMP68]], align 4 3775 // CHECK3-NEXT: [[TMP69:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 3776 // CHECK3-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* 3777 // CHECK3-NEXT: store i32 [[TMP60]], i32* [[TMP70]], align 4 3778 // CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 3779 // CHECK3-NEXT: store i8* null, i8** [[TMP71]], align 4 3780 // CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 3781 // CHECK3-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 3782 // CHECK3-NEXT: [[TMP74:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146.region_id, i32 2, i8** [[TMP72]], i8** [[TMP73]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3783 // CHECK3-NEXT: [[TMP75:%.*]] = icmp ne i32 [[TMP74]], 0 3784 // CHECK3-NEXT: br i1 [[TMP75]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]] 3785 // CHECK3: omp_offload.failed9: 3786 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i32 [[TMP58]], i32 [[TMP60]]) #[[ATTR4]] 3787 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT10]] 3788 // CHECK3: omp_offload.cont10: 3789 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 3790 // CHECK3: omp_if.else: 3791 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i32 [[TMP58]], i32 [[TMP60]]) #[[ATTR4]] 3792 // CHECK3-NEXT: br label [[OMP_IF_END]] 3793 // CHECK3: omp_if.end: 3794 // CHECK3-NEXT: [[TMP76:%.*]] = load i32, i32* [[A]], align 4 3795 // CHECK3-NEXT: store i32 [[TMP76]], i32* [[DOTCAPTURE_EXPR_]], align 4 3796 // CHECK3-NEXT: [[TMP77:%.*]] = load i32, i32* [[A]], align 4 3797 // CHECK3-NEXT: store i32 [[TMP77]], i32* [[A_CASTED11]], align 4 3798 // CHECK3-NEXT: [[TMP78:%.*]] = load i32, i32* [[A_CASTED11]], align 4 3799 // CHECK3-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3800 // CHECK3-NEXT: store i32 [[TMP79]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 3801 // CHECK3-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 3802 // CHECK3-NEXT: [[TMP81:%.*]] = load i32, i32* [[N_ADDR]], align 4 3803 // CHECK3-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP81]], 20 3804 // CHECK3-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]] 3805 // CHECK3: omp_if.then13: 3806 // CHECK3-NEXT: [[TMP82:%.*]] = mul nuw i32 [[TMP1]], 4 3807 // CHECK3-NEXT: [[TMP83:%.*]] = sext i32 [[TMP82]] to i64 3808 // CHECK3-NEXT: [[TMP84:%.*]] = mul nuw i32 5, [[TMP3]] 3809 // CHECK3-NEXT: [[TMP85:%.*]] = mul nuw i32 [[TMP84]], 8 3810 // CHECK3-NEXT: [[TMP86:%.*]] = sext i32 [[TMP85]] to i64 3811 // CHECK3-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 3812 // CHECK3-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* 3813 // CHECK3-NEXT: store i32 [[TMP78]], i32* [[TMP88]], align 4 3814 // CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 3815 // CHECK3-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* 3816 // CHECK3-NEXT: store i32 [[TMP78]], i32* [[TMP90]], align 4 3817 // CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 3818 // CHECK3-NEXT: store i64 4, i64* [[TMP91]], align 4 3819 // CHECK3-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 3820 // CHECK3-NEXT: store i8* null, i8** [[TMP92]], align 4 3821 // CHECK3-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 3822 // CHECK3-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to [10 x float]** 3823 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP94]], align 4 3824 // CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 3825 // CHECK3-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** 3826 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 4 3827 // CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 3828 // CHECK3-NEXT: store i64 40, i64* [[TMP97]], align 4 3829 // CHECK3-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 3830 // CHECK3-NEXT: store i8* null, i8** [[TMP98]], align 4 3831 // CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 3832 // CHECK3-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* 3833 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP100]], align 4 3834 // CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 3835 // CHECK3-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* 3836 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP102]], align 4 3837 // CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 3838 // CHECK3-NEXT: store i64 4, i64* [[TMP103]], align 4 3839 // CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 3840 // CHECK3-NEXT: store i8* null, i8** [[TMP104]], align 4 3841 // CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 3842 // CHECK3-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to float** 3843 // CHECK3-NEXT: store float* [[VLA]], float** [[TMP106]], align 4 3844 // CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 3845 // CHECK3-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** 3846 // CHECK3-NEXT: store float* [[VLA]], float** [[TMP108]], align 4 3847 // CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 3848 // CHECK3-NEXT: store i64 [[TMP83]], i64* [[TMP109]], align 4 3849 // CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 3850 // CHECK3-NEXT: store i8* null, i8** [[TMP110]], align 4 3851 // CHECK3-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 3852 // CHECK3-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** 3853 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 4 3854 // CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 3855 // CHECK3-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** 3856 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 4 3857 // CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 3858 // CHECK3-NEXT: store i64 400, i64* [[TMP115]], align 4 3859 // CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 3860 // CHECK3-NEXT: store i8* null, i8** [[TMP116]], align 4 3861 // CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 3862 // CHECK3-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32* 3863 // CHECK3-NEXT: store i32 5, i32* [[TMP118]], align 4 3864 // CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 3865 // CHECK3-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32* 3866 // CHECK3-NEXT: store i32 5, i32* [[TMP120]], align 4 3867 // CHECK3-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 3868 // CHECK3-NEXT: store i64 4, i64* [[TMP121]], align 4 3869 // CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 3870 // CHECK3-NEXT: store i8* null, i8** [[TMP122]], align 4 3871 // CHECK3-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 3872 // CHECK3-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* 3873 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP124]], align 4 3874 // CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 3875 // CHECK3-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* 3876 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 3877 // CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 3878 // CHECK3-NEXT: store i64 4, i64* [[TMP127]], align 4 3879 // CHECK3-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 3880 // CHECK3-NEXT: store i8* null, i8** [[TMP128]], align 4 3881 // CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 3882 // CHECK3-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to double** 3883 // CHECK3-NEXT: store double* [[VLA1]], double** [[TMP130]], align 4 3884 // CHECK3-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 3885 // CHECK3-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** 3886 // CHECK3-NEXT: store double* [[VLA1]], double** [[TMP132]], align 4 3887 // CHECK3-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 3888 // CHECK3-NEXT: store i64 [[TMP86]], i64* [[TMP133]], align 4 3889 // CHECK3-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 3890 // CHECK3-NEXT: store i8* null, i8** [[TMP134]], align 4 3891 // CHECK3-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 3892 // CHECK3-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** 3893 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 3894 // CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 3895 // CHECK3-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** 3896 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 3897 // CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 3898 // CHECK3-NEXT: store i64 12, i64* [[TMP139]], align 4 3899 // CHECK3-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 3900 // CHECK3-NEXT: store i8* null, i8** [[TMP140]], align 4 3901 // CHECK3-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 3902 // CHECK3-NEXT: [[TMP142:%.*]] = bitcast i8** [[TMP141]] to i32* 3903 // CHECK3-NEXT: store i32 [[TMP80]], i32* [[TMP142]], align 4 3904 // CHECK3-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 3905 // CHECK3-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* 3906 // CHECK3-NEXT: store i32 [[TMP80]], i32* [[TMP144]], align 4 3907 // CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 3908 // CHECK3-NEXT: store i64 4, i64* [[TMP145]], align 4 3909 // CHECK3-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 3910 // CHECK3-NEXT: store i8* null, i8** [[TMP146]], align 4 3911 // CHECK3-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 3912 // CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 3913 // CHECK3-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 3914 // CHECK3-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP147]], i8** [[TMP148]], i64* [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 3915 // CHECK3-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 3916 // CHECK3-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] 3917 // CHECK3: omp_offload.failed17: 3918 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] 3919 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT18]] 3920 // CHECK3: omp_offload.cont18: 3921 // CHECK3-NEXT: br label [[OMP_IF_END20:%.*]] 3922 // CHECK3: omp_if.else19: 3923 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] 3924 // CHECK3-NEXT: br label [[OMP_IF_END20]] 3925 // CHECK3: omp_if.end20: 3926 // CHECK3-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 3927 // CHECK3-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 3928 // CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) 3929 // CHECK3-NEXT: ret i32 [[TMP152]] 3930 // 3931 // 3932 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 3933 // CHECK3-SAME: () #[[ATTR2:[0-9]+]] { 3934 // CHECK3-NEXT: entry: 3935 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 3936 // CHECK3-NEXT: ret void 3937 // 3938 // 3939 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. 3940 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 3941 // CHECK3-NEXT: entry: 3942 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3943 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3944 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3945 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3946 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3947 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3948 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3949 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3950 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3951 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3952 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3953 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3954 // CHECK3-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 3955 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3956 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3957 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 3958 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 3959 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3960 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3961 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 3962 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3963 // CHECK3: cond.true: 3964 // CHECK3-NEXT: br label [[COND_END:%.*]] 3965 // CHECK3: cond.false: 3966 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3967 // CHECK3-NEXT: br label [[COND_END]] 3968 // CHECK3: cond.end: 3969 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 3970 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3971 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3972 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 3973 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3974 // CHECK3: omp.inner.for.cond: 3975 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3976 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3977 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 3978 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3979 // CHECK3: omp.inner.for.body: 3980 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3981 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 3982 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 3983 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4 3984 // CHECK3-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 3985 // CHECK3-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 3986 // CHECK3-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 3987 // CHECK3: .cancel.exit: 3988 // CHECK3-NEXT: br label [[CANCEL_EXIT:%.*]] 3989 // CHECK3: .cancel.continue: 3990 // CHECK3-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 3991 // CHECK3-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 3992 // CHECK3-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 3993 // CHECK3: .cancel.exit2: 3994 // CHECK3-NEXT: br label [[CANCEL_EXIT]] 3995 // CHECK3: .cancel.continue3: 3996 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3997 // CHECK3: omp.body.continue: 3998 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3999 // CHECK3: omp.inner.for.inc: 4000 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4001 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 4002 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 4003 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] 4004 // CHECK3: omp.inner.for.end: 4005 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4006 // CHECK3: omp.loop.exit: 4007 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 4008 // CHECK3-NEXT: br label [[CANCEL_CONT:%.*]] 4009 // CHECK3: cancel.cont: 4010 // CHECK3-NEXT: ret void 4011 // CHECK3: cancel.exit: 4012 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 4013 // CHECK3-NEXT: br label [[CANCEL_CONT]] 4014 // 4015 // 4016 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110 4017 // CHECK3-SAME: (i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 4018 // CHECK3-NEXT: entry: 4019 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4020 // CHECK3-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 4021 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4022 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4023 // CHECK3-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 4024 // CHECK3-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 4025 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 4026 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 4027 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 4028 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) 4029 // CHECK3-NEXT: ret void 4030 // 4031 // 4032 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1 4033 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 4034 // CHECK3-NEXT: entry: 4035 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4036 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4037 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4038 // CHECK3-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 4039 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4040 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 4041 // CHECK3-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 4042 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4043 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4044 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4045 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4046 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 4047 // CHECK3-NEXT: [[K1:%.*]] = alloca i64, align 8 4048 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4049 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4050 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4051 // CHECK3-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 4052 // CHECK3-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 4053 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 4054 // CHECK3-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 4055 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4056 // CHECK3-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 4057 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4058 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4059 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4060 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 4061 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 4062 // CHECK3-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 35, i32 0, i32 8, i32 1, i32 1) 4063 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 4064 // CHECK3: omp.dispatch.cond: 4065 // CHECK3-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 4066 // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 4067 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 4068 // CHECK3: omp.dispatch.body: 4069 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4070 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 4071 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4072 // CHECK3: omp.inner.for.cond: 4073 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 4074 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 4075 // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 4076 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4077 // CHECK3: omp.inner.for.body: 4078 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 4079 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 4080 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 4081 // CHECK3-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !13 4082 // CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !13 4083 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 4084 // CHECK3-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 4085 // CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 4086 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] 4087 // CHECK3-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !13 4088 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !13 4089 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 4090 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !13 4091 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4092 // CHECK3: omp.body.continue: 4093 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4094 // CHECK3: omp.inner.for.inc: 4095 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 4096 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 4097 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 4098 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 4099 // CHECK3: omp.inner.for.end: 4100 // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 4101 // CHECK3: omp.dispatch.inc: 4102 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] 4103 // CHECK3: omp.dispatch.end: 4104 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4105 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 4106 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 4107 // CHECK3: .omp.linear.pu: 4108 // CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8 4109 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP15]], 27 4110 // CHECK3-NEXT: store i64 [[ADD5]], i64* [[TMP0]], align 8 4111 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 4112 // CHECK3: .omp.linear.pu.done: 4113 // CHECK3-NEXT: ret void 4114 // 4115 // 4116 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 4117 // CHECK3-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR2]] { 4118 // CHECK3-NEXT: entry: 4119 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4120 // CHECK3-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 4121 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4122 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 4123 // CHECK3-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 4124 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4125 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4126 // CHECK3-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 4127 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4128 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4129 // CHECK3-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 4130 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 4131 // CHECK3-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 4132 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 4133 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 4134 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 4135 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 4136 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 4137 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 4138 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 4139 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 4140 // CHECK3-NEXT: ret void 4141 // 4142 // 4143 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2 4144 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR3]] { 4145 // CHECK3-NEXT: entry: 4146 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4147 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4148 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4149 // CHECK3-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 4150 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4151 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 4152 // CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 4 4153 // CHECK3-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 4154 // CHECK3-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 4155 // CHECK3-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 4156 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 4157 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 4158 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 4159 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4160 // CHECK3-NEXT: [[IT:%.*]] = alloca i64, align 8 4161 // CHECK3-NEXT: [[LIN2:%.*]] = alloca i32, align 4 4162 // CHECK3-NEXT: [[A3:%.*]] = alloca i32, align 4 4163 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4164 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4165 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4166 // CHECK3-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 4167 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4168 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4169 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 4170 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 4171 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 4172 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 4173 // CHECK3-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 4174 // CHECK3-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 4175 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 4176 // CHECK3-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 4177 // CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 4178 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4179 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4180 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 4181 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 4182 // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 4183 // CHECK3-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 4184 // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 4185 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4186 // CHECK3: cond.true: 4187 // CHECK3-NEXT: br label [[COND_END:%.*]] 4188 // CHECK3: cond.false: 4189 // CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 4190 // CHECK3-NEXT: br label [[COND_END]] 4191 // CHECK3: cond.end: 4192 // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 4193 // CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 4194 // CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 4195 // CHECK3-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 4196 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4197 // CHECK3: omp.inner.for.cond: 4198 // CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 4199 // CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 4200 // CHECK3-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 4201 // CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4202 // CHECK3: omp.inner.for.body: 4203 // CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 4204 // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 4205 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 4206 // CHECK3-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 4207 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 4208 // CHECK3-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 4209 // CHECK3-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 4210 // CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 4211 // CHECK3-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 4212 // CHECK3-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 4213 // CHECK3-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 4214 // CHECK3-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4 4215 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 4216 // CHECK3-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 4217 // CHECK3-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 4218 // CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 4219 // CHECK3-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 4220 // CHECK3-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 4221 // CHECK3-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 4222 // CHECK3-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4 4223 // CHECK3-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4 4224 // CHECK3-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 4225 // CHECK3-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 4226 // CHECK3-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 4227 // CHECK3-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4 4228 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4229 // CHECK3: omp.body.continue: 4230 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4231 // CHECK3: omp.inner.for.inc: 4232 // CHECK3-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 4233 // CHECK3-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 4234 // CHECK3-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8 4235 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] 4236 // CHECK3: omp.inner.for.end: 4237 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4238 // CHECK3: omp.loop.exit: 4239 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 4240 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4241 // CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 4242 // CHECK3-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 4243 // CHECK3: .omp.linear.pu: 4244 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 4245 // CHECK3-NEXT: [[CONV16:%.*]] = sext i32 [[TMP20]] to i64 4246 // CHECK3-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 4247 // CHECK3-NEXT: [[MUL17:%.*]] = mul i64 4, [[TMP21]] 4248 // CHECK3-NEXT: [[ADD18:%.*]] = add i64 [[CONV16]], [[MUL17]] 4249 // CHECK3-NEXT: [[CONV19:%.*]] = trunc i64 [[ADD18]] to i32 4250 // CHECK3-NEXT: store i32 [[CONV19]], i32* [[LIN_ADDR]], align 4 4251 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 4252 // CHECK3-NEXT: [[CONV20:%.*]] = sext i32 [[TMP22]] to i64 4253 // CHECK3-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 4254 // CHECK3-NEXT: [[MUL21:%.*]] = mul i64 4, [[TMP23]] 4255 // CHECK3-NEXT: [[ADD22:%.*]] = add i64 [[CONV20]], [[MUL21]] 4256 // CHECK3-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD22]] to i32 4257 // CHECK3-NEXT: store i32 [[CONV23]], i32* [[A_ADDR]], align 4 4258 // CHECK3-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 4259 // CHECK3: .omp.linear.pu.done: 4260 // CHECK3-NEXT: ret void 4261 // 4262 // 4263 // CHECK3-LABEL: define {{[^@]+}}@.omp_task_privates_map. 4264 // CHECK3-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i16** noalias [[TMP1:%.*]], [3 x i8*]** noalias [[TMP2:%.*]], [3 x i8*]** noalias [[TMP3:%.*]], [3 x i64]** noalias [[TMP4:%.*]]) #[[ATTR6:[0-9]+]] { 4265 // CHECK3-NEXT: entry: 4266 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 4 4267 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i16**, align 4 4268 // CHECK3-NEXT: [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 4 4269 // CHECK3-NEXT: [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 4 4270 // CHECK3-NEXT: [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 4 4271 // CHECK3-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 4 4272 // CHECK3-NEXT: store i16** [[TMP1]], i16*** [[DOTADDR1]], align 4 4273 // CHECK3-NEXT: store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 4 4274 // CHECK3-NEXT: store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 4 4275 // CHECK3-NEXT: store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 4 4276 // CHECK3-NEXT: [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 4 4277 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0 4278 // CHECK3-NEXT: [[TMP7:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 4 4279 // CHECK3-NEXT: store [3 x i64]* [[TMP6]], [3 x i64]** [[TMP7]], align 4 4280 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1 4281 // CHECK3-NEXT: [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 4 4282 // CHECK3-NEXT: store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 4 4283 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2 4284 // CHECK3-NEXT: [[TMP11:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 4 4285 // CHECK3-NEXT: store [3 x i8*]* [[TMP10]], [3 x i8*]** [[TMP11]], align 4 4286 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3 4287 // CHECK3-NEXT: [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 4 4288 // CHECK3-NEXT: store i16* [[TMP12]], i16** [[TMP13]], align 4 4289 // CHECK3-NEXT: ret void 4290 // 4291 // 4292 // CHECK3-LABEL: define {{[^@]+}}@.omp_task_entry. 4293 // CHECK3-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] { 4294 // CHECK3-NEXT: entry: 4295 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 4296 // CHECK3-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 4297 // CHECK3-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 4298 // CHECK3-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 4299 // CHECK3-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 4300 // CHECK3-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 4301 // CHECK3-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 4 4302 // CHECK3-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 4 4303 // CHECK3-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 4 4304 // CHECK3-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 4 4305 // CHECK3-NEXT: [[AA_CASTED_I:%.*]] = alloca i32, align 4 4306 // CHECK3-NEXT: [[LIN_CASTED_I:%.*]] = alloca i32, align 4 4307 // CHECK3-NEXT: [[A_CASTED_I:%.*]] = alloca i32, align 4 4308 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 4309 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 4310 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 4311 // CHECK3-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 4312 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 4313 // CHECK3-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 4314 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 4315 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 4316 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 4317 // CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 4318 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 4319 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 4320 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* 4321 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 4322 // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) 4323 // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) 4324 // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) 4325 // CHECK3-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) 4326 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 4327 // CHECK3-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !25 4328 // CHECK3-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 4329 // CHECK3-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 4330 // CHECK3-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !25 4331 // CHECK3-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !25 4332 // CHECK3-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !25 4333 // CHECK3-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 4334 // CHECK3-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 4335 // CHECK3-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* 4336 // CHECK3-NEXT: call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] 4337 // CHECK3-NEXT: [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !25 4338 // CHECK3-NEXT: [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !25 4339 // CHECK3-NEXT: [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !25 4340 // CHECK3-NEXT: [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !25 4341 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i32 0, i32 0 4342 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i32 0, i32 0 4343 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i32 0, i32 0 4344 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1 4345 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2 4346 // CHECK3-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 4347 // CHECK3-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 4348 // CHECK3-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] 4349 // CHECK3: omp_offload.failed.i: 4350 // CHECK3-NEXT: [[TMP27:%.*]] = load i16, i16* [[TMP16]], align 2 4351 // CHECK3-NEXT: [[CONV_I:%.*]] = bitcast i32* [[AA_CASTED_I]] to i16* 4352 // CHECK3-NEXT: store i16 [[TMP27]], i16* [[CONV_I]], align 2, !noalias !25 4353 // CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[AA_CASTED_I]], align 4, !noalias !25 4354 // CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP23]], align 4 4355 // CHECK3-NEXT: store i32 [[TMP29]], i32* [[LIN_CASTED_I]], align 4, !noalias !25 4356 // CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[LIN_CASTED_I]], align 4, !noalias !25 4357 // CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP24]], align 4 4358 // CHECK3-NEXT: store i32 [[TMP31]], i32* [[A_CASTED_I]], align 4, !noalias !25 4359 // CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[A_CASTED_I]], align 4, !noalias !25 4360 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138(i32 [[TMP28]], i32 [[TMP30]], i32 [[TMP32]]) #[[ATTR4]] 4361 // CHECK3-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] 4362 // CHECK3: .omp_outlined..3.exit: 4363 // CHECK3-NEXT: ret i32 0 4364 // 4365 // 4366 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 4367 // CHECK3-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] { 4368 // CHECK3-NEXT: entry: 4369 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4370 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4371 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4372 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 4373 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4374 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4375 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4376 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 4377 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 4378 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 4379 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 4380 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 4381 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 4382 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 4383 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 4384 // CHECK3-NEXT: ret void 4385 // 4386 // 4387 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4 4388 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR3]] { 4389 // CHECK3-NEXT: entry: 4390 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4391 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4392 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4393 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 4394 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4395 // CHECK3-NEXT: [[TMP:%.*]] = alloca i16, align 2 4396 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4397 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4398 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4399 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4400 // CHECK3-NEXT: [[IT:%.*]] = alloca i16, align 2 4401 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4402 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4403 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4404 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 4405 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 4406 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4407 // CHECK3-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 4408 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4409 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4410 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4411 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 4412 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4413 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4414 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 4415 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4416 // CHECK3: cond.true: 4417 // CHECK3-NEXT: br label [[COND_END:%.*]] 4418 // CHECK3: cond.false: 4419 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4420 // CHECK3-NEXT: br label [[COND_END]] 4421 // CHECK3: cond.end: 4422 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 4423 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4424 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4425 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 4426 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4427 // CHECK3: omp.inner.for.cond: 4428 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4429 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4430 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 4431 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4432 // CHECK3: omp.inner.for.body: 4433 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4434 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 4435 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 4436 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 4437 // CHECK3-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2 4438 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 4439 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 4440 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4 4441 // CHECK3-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4 4442 // CHECK3-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 4443 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 4444 // CHECK3-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 4445 // CHECK3-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4 4446 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4447 // CHECK3: omp.body.continue: 4448 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4449 // CHECK3: omp.inner.for.inc: 4450 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4451 // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 4452 // CHECK3-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 4453 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] 4454 // CHECK3: omp.inner.for.end: 4455 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4456 // CHECK3: omp.loop.exit: 4457 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 4458 // CHECK3-NEXT: ret void 4459 // 4460 // 4461 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 4462 // CHECK3-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 4463 // CHECK3-NEXT: entry: 4464 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4465 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 4466 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4467 // CHECK3-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 4468 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 4469 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 4470 // CHECK3-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 4471 // CHECK3-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 4472 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 4473 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 4474 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4475 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 4476 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4477 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 4478 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4479 // CHECK3-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 4480 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 4481 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 4482 // CHECK3-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 4483 // CHECK3-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 4484 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 4485 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4486 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 4487 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4488 // CHECK3-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 4489 // CHECK3-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 4490 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 4491 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 4492 // CHECK3-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 4493 // CHECK3-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 4494 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 4495 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 4496 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 4497 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4498 // CHECK3-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4499 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4500 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 4501 // CHECK3-NEXT: ret void 4502 // 4503 // 4504 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7 4505 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 4506 // CHECK3-NEXT: entry: 4507 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4508 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4509 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 4510 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 4511 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4512 // CHECK3-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 4513 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 4514 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 4515 // CHECK3-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 4516 // CHECK3-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 4517 // CHECK3-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 4518 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 4519 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4520 // CHECK3-NEXT: [[TMP:%.*]] = alloca i8, align 1 4521 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4522 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4523 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4524 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4525 // CHECK3-NEXT: [[IT:%.*]] = alloca i8, align 1 4526 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4527 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4528 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 4529 // CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 4530 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4531 // CHECK3-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 4532 // CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 4533 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 4534 // CHECK3-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 4535 // CHECK3-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 4536 // CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 4537 // CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4538 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 4539 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4540 // CHECK3-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 4541 // CHECK3-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 4542 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 4543 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 4544 // CHECK3-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 4545 // CHECK3-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 4546 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4547 // CHECK3-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 4548 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4549 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4550 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4551 // CHECK3-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4552 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 4553 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 4554 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 4555 // CHECK3: omp.dispatch.cond: 4556 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4557 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 4558 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4559 // CHECK3: cond.true: 4560 // CHECK3-NEXT: br label [[COND_END:%.*]] 4561 // CHECK3: cond.false: 4562 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4563 // CHECK3-NEXT: br label [[COND_END]] 4564 // CHECK3: cond.end: 4565 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 4566 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4567 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4568 // CHECK3-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 4569 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4570 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4571 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 4572 // CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 4573 // CHECK3: omp.dispatch.body: 4574 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4575 // CHECK3: omp.inner.for.cond: 4576 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4577 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4578 // CHECK3-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 4579 // CHECK3-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4580 // CHECK3: omp.inner.for.body: 4581 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4582 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 4583 // CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 4584 // CHECK3-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 4585 // CHECK3-NEXT: store i8 [[CONV]], i8* [[IT]], align 1 4586 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4 4587 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 4588 // CHECK3-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 4589 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 4590 // CHECK3-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 4591 // CHECK3-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 4592 // CHECK3-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 4593 // CHECK3-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 4594 // CHECK3-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4 4595 // CHECK3-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 4596 // CHECK3-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4 4597 // CHECK3-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 4598 // CHECK3-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 4599 // CHECK3-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 4600 // CHECK3-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4 4601 // CHECK3-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 4602 // CHECK3-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 4603 // CHECK3-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8 4604 // CHECK3-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 4605 // CHECK3-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8 4606 // CHECK3-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 4607 // CHECK3-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 4608 // CHECK3-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 4609 // CHECK3-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8 4610 // CHECK3-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 4611 // CHECK3-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8 4612 // CHECK3-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 4613 // CHECK3-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4 4614 // CHECK3-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 4615 // CHECK3-NEXT: store i64 [[ADD20]], i64* [[X]], align 4 4616 // CHECK3-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 4617 // CHECK3-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4 4618 // CHECK3-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 4619 // CHECK3-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 4620 // CHECK3-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 4621 // CHECK3-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4 4622 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4623 // CHECK3: omp.body.continue: 4624 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4625 // CHECK3: omp.inner.for.inc: 4626 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4627 // CHECK3-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 4628 // CHECK3-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4 4629 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] 4630 // CHECK3: omp.inner.for.end: 4631 // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 4632 // CHECK3: omp.dispatch.inc: 4633 // CHECK3-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4634 // CHECK3-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4635 // CHECK3-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 4636 // CHECK3-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 4637 // CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4638 // CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4639 // CHECK3-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 4640 // CHECK3-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 4641 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] 4642 // CHECK3: omp.dispatch.end: 4643 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 4644 // CHECK3-NEXT: ret void 4645 // 4646 // 4647 // CHECK3-LABEL: define {{[^@]+}}@_Z3bari 4648 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 4649 // CHECK3-NEXT: entry: 4650 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4651 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 4652 // CHECK3-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 4653 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4654 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 4655 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4656 // CHECK3-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 4657 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 4658 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 4659 // CHECK3-NEXT: store i32 [[ADD]], i32* [[A]], align 4 4660 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 4661 // CHECK3-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 4662 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 4663 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 4664 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 4665 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 4666 // CHECK3-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 4667 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 4668 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 4669 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 4670 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 4671 // CHECK3-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 4672 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 4673 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 4674 // CHECK3-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 4675 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 4676 // CHECK3-NEXT: ret i32 [[TMP8]] 4677 // 4678 // 4679 // CHECK3-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 4680 // CHECK3-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 4681 // CHECK3-NEXT: entry: 4682 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4683 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4684 // CHECK3-NEXT: [[B:%.*]] = alloca i32, align 4 4685 // CHECK3-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 4686 // CHECK3-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 4687 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 4688 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 4689 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 4690 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 4691 // CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 4692 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4693 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4694 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4695 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 4696 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 4697 // CHECK3-NEXT: store i32 [[ADD]], i32* [[B]], align 4 4698 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 4699 // CHECK3-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 4700 // CHECK3-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 4701 // CHECK3-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 4702 // CHECK3-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 4703 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 4704 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4 4705 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 4706 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 4707 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 4708 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 4709 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 4710 // CHECK3: omp_if.then: 4711 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 4712 // CHECK3-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]] 4713 // CHECK3-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2 4714 // CHECK3-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 4715 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4716 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 4717 // CHECK3-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4 4718 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4719 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 4720 // CHECK3-NEXT: store double* [[A]], double** [[TMP13]], align 4 4721 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 4722 // CHECK3-NEXT: store i64 8, i64* [[TMP14]], align 4 4723 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4724 // CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 4725 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4726 // CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 4727 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 4728 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4729 // CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 4730 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 4731 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 4732 // CHECK3-NEXT: store i64 4, i64* [[TMP20]], align 4 4733 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4734 // CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 4735 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4736 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 4737 // CHECK3-NEXT: store i32 2, i32* [[TMP23]], align 4 4738 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4739 // CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 4740 // CHECK3-NEXT: store i32 2, i32* [[TMP25]], align 4 4741 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 4742 // CHECK3-NEXT: store i64 4, i64* [[TMP26]], align 4 4743 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 4744 // CHECK3-NEXT: store i8* null, i8** [[TMP27]], align 4 4745 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 4746 // CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* 4747 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 4748 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 4749 // CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* 4750 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 4751 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 4752 // CHECK3-NEXT: store i64 4, i64* [[TMP32]], align 4 4753 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 4754 // CHECK3-NEXT: store i8* null, i8** [[TMP33]], align 4 4755 // CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 4756 // CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 4757 // CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 4758 // CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 4759 // CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 4760 // CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 4761 // CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 4762 // CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 4763 // CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 4764 // CHECK3-NEXT: store i8* null, i8** [[TMP39]], align 4 4765 // CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4766 // CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4767 // CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 4768 // CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 4769 // CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 4770 // CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4771 // CHECK3: omp_offload.failed: 4772 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 4773 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 4774 // CHECK3: omp_offload.cont: 4775 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 4776 // CHECK3: omp_if.else: 4777 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 4778 // CHECK3-NEXT: br label [[OMP_IF_END]] 4779 // CHECK3: omp_if.end: 4780 // CHECK3-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] 4781 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] 4782 // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 4783 // CHECK3-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 4784 // CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 4785 // CHECK3-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 4786 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] 4787 // CHECK3-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 4788 // CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 4789 // CHECK3-NEXT: ret i32 [[ADD3]] 4790 // 4791 // 4792 // CHECK3-LABEL: define {{[^@]+}}@_ZL7fstatici 4793 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 4794 // CHECK3-NEXT: entry: 4795 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4796 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 4797 // CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2 4798 // CHECK3-NEXT: [[AAA:%.*]] = alloca i8, align 1 4799 // CHECK3-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 4800 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4801 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 4802 // CHECK3-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 4803 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 4804 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 4805 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 4806 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4807 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 4808 // CHECK3-NEXT: store i16 0, i16* [[AA]], align 2 4809 // CHECK3-NEXT: store i8 0, i8* [[AAA]], align 1 4810 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 4811 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 4812 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 4813 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 4814 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 4815 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 4816 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 4817 // CHECK3-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 4818 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 4819 // CHECK3-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 4820 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 4821 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 4822 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 4823 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 4824 // CHECK3: omp_if.then: 4825 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4826 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 4827 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 4828 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4829 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 4830 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 4831 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4832 // CHECK3-NEXT: store i8* null, i8** [[TMP11]], align 4 4833 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4834 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 4835 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 4836 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4837 // CHECK3-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 4838 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 4839 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4840 // CHECK3-NEXT: store i8* null, i8** [[TMP16]], align 4 4841 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4842 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 4843 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 4844 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4845 // CHECK3-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 4846 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 4847 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 4848 // CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 4849 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 4850 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 4851 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 4852 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 4853 // CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 4854 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 4855 // CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 4856 // CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4 4857 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4858 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4859 // CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 4860 // CHECK3-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 4861 // CHECK3-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4862 // CHECK3: omp_offload.failed: 4863 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 4864 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 4865 // CHECK3: omp_offload.cont: 4866 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 4867 // CHECK3: omp_if.else: 4868 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 4869 // CHECK3-NEXT: br label [[OMP_IF_END]] 4870 // CHECK3: omp_if.end: 4871 // CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 4872 // CHECK3-NEXT: ret i32 [[TMP31]] 4873 // 4874 // 4875 // CHECK3-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 4876 // CHECK3-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 4877 // CHECK3-NEXT: entry: 4878 // CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4879 // CHECK3-NEXT: [[A:%.*]] = alloca i32, align 4 4880 // CHECK3-NEXT: [[AA:%.*]] = alloca i16, align 2 4881 // CHECK3-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 4882 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 4883 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 4884 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 4885 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 4886 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 4887 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4888 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 4889 // CHECK3-NEXT: store i16 0, i16* [[AA]], align 2 4890 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 4891 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 4892 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 4893 // CHECK3-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 4894 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 4895 // CHECK3-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 4896 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 4897 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 4898 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 4899 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 4900 // CHECK3: omp_if.then: 4901 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4902 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 4903 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 4904 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4905 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 4906 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 4907 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4908 // CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4 4909 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4910 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* 4911 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 4912 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4913 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 4914 // CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 4915 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4916 // CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 4917 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4918 // CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 4919 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 4920 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4921 // CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 4922 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 4923 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 4924 // CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 4925 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4926 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4927 // CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 4928 // CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 4929 // CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4930 // CHECK3: omp_offload.failed: 4931 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 4932 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 4933 // CHECK3: omp_offload.cont: 4934 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 4935 // CHECK3: omp_if.else: 4936 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 4937 // CHECK3-NEXT: br label [[OMP_IF_END]] 4938 // CHECK3: omp_if.end: 4939 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 4940 // CHECK3-NEXT: ret i32 [[TMP24]] 4941 // 4942 // 4943 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 4944 // CHECK3-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 4945 // CHECK3-NEXT: entry: 4946 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4947 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 4948 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4949 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 4950 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 4951 // CHECK3-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 4952 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4953 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 4954 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4955 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 4956 // CHECK3-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 4957 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4958 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4959 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 4960 // CHECK3-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 4961 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 4962 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 4963 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 4964 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 4965 // CHECK3-NEXT: ret void 4966 // 4967 // 4968 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..9 4969 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 4970 // CHECK3-NEXT: entry: 4971 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4972 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4973 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 4974 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 4975 // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4976 // CHECK3-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 4977 // CHECK3-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 4978 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 4979 // CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 4 4980 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 4981 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 4982 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 4983 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4984 // CHECK3-NEXT: [[IT:%.*]] = alloca i64, align 8 4985 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4986 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4987 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 4988 // CHECK3-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 4989 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4990 // CHECK3-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 4991 // CHECK3-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 4992 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 4993 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4994 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 4995 // CHECK3-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 4996 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 4997 // CHECK3-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 4998 // CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 4999 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5000 // CHECK3-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5001 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 5002 // CHECK3-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 5003 // CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5004 // CHECK3-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 5005 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5006 // CHECK3: cond.true: 5007 // CHECK3-NEXT: br label [[COND_END:%.*]] 5008 // CHECK3: cond.false: 5009 // CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5010 // CHECK3-NEXT: br label [[COND_END]] 5011 // CHECK3: cond.end: 5012 // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 5013 // CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 5014 // CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 5015 // CHECK3-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 5016 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5017 // CHECK3: omp.inner.for.cond: 5018 // CHECK3-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5019 // CHECK3-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5020 // CHECK3-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 5021 // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5022 // CHECK3: omp.inner.for.body: 5023 // CHECK3-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5024 // CHECK3-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 5025 // CHECK3-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 5026 // CHECK3-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 5027 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4 5028 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 5029 // CHECK3-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 5030 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 5031 // CHECK3-NEXT: store double [[ADD]], double* [[A]], align 4 5032 // CHECK3-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 5033 // CHECK3-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4 5034 // CHECK3-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 5035 // CHECK3-NEXT: store double [[INC]], double* [[A4]], align 4 5036 // CHECK3-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 5037 // CHECK3-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 5038 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 5039 // CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 5040 // CHECK3-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2 5041 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5042 // CHECK3: omp.body.continue: 5043 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5044 // CHECK3: omp.inner.for.inc: 5045 // CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5046 // CHECK3-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 5047 // CHECK3-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 5048 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] 5049 // CHECK3: omp.inner.for.end: 5050 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5051 // CHECK3: omp.loop.exit: 5052 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 5053 // CHECK3-NEXT: ret void 5054 // 5055 // 5056 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 5057 // CHECK3-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 5058 // CHECK3-NEXT: entry: 5059 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5060 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5061 // CHECK3-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 5062 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 5063 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5064 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 5065 // CHECK3-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 5066 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5067 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5068 // CHECK3-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 5069 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 5070 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5071 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 5072 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 5073 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 5074 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 5075 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 5076 // CHECK3-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 5077 // CHECK3-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 5078 // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 5079 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 5080 // CHECK3-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 5081 // CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 5082 // CHECK3-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 5083 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 5084 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 5085 // CHECK3-NEXT: ret void 5086 // 5087 // 5088 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 5089 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 5090 // CHECK3-NEXT: entry: 5091 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5092 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5093 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5094 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5095 // CHECK3-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 5096 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 5097 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5098 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 5099 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5100 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5101 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5102 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5103 // CHECK3-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 5104 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 5105 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5106 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 5107 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 5108 // CHECK3-NEXT: ret void 5109 // 5110 // 5111 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 5112 // CHECK3-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 5113 // CHECK3-NEXT: entry: 5114 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5115 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5116 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 5117 // CHECK3-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5118 // CHECK3-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 5119 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5120 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5121 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 5122 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5123 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 5124 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 5125 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 5126 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 5127 // CHECK3-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 5128 // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 5129 // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 5130 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 5131 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 5132 // CHECK3-NEXT: ret void 5133 // 5134 // 5135 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14 5136 // CHECK3-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 5137 // CHECK3-NEXT: entry: 5138 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5139 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5140 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5141 // CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5142 // CHECK3-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 5143 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 5144 // CHECK3-NEXT: [[TMP:%.*]] = alloca i64, align 4 5145 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 5146 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 5147 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 5148 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5149 // CHECK3-NEXT: [[I:%.*]] = alloca i64, align 8 5150 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5151 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5152 // CHECK3-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5153 // CHECK3-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5154 // CHECK3-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 5155 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5156 // CHECK3-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 5157 // CHECK3-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 5158 // CHECK3-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 5159 // CHECK3-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 5160 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5161 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5162 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 5163 // CHECK3-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 5164 // CHECK3-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5165 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 5166 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5167 // CHECK3: cond.true: 5168 // CHECK3-NEXT: br label [[COND_END:%.*]] 5169 // CHECK3: cond.false: 5170 // CHECK3-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5171 // CHECK3-NEXT: br label [[COND_END]] 5172 // CHECK3: cond.end: 5173 // CHECK3-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 5174 // CHECK3-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 5175 // CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 5176 // CHECK3-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 5177 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5178 // CHECK3: omp.inner.for.cond: 5179 // CHECK3-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5180 // CHECK3-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5181 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 5182 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5183 // CHECK3: omp.inner.for.body: 5184 // CHECK3-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5185 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 5186 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 5187 // CHECK3-NEXT: store i64 [[ADD]], i64* [[I]], align 8 5188 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 5189 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 5190 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4 5191 // CHECK3-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4 5192 // CHECK3-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 5193 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 5194 // CHECK3-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 5195 // CHECK3-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4 5196 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 5197 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 5198 // CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 5199 // CHECK3-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 5200 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5201 // CHECK3: omp.body.continue: 5202 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5203 // CHECK3: omp.inner.for.inc: 5204 // CHECK3-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5205 // CHECK3-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 5206 // CHECK3-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 5207 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] 5208 // CHECK3: omp.inner.for.end: 5209 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5210 // CHECK3: omp.loop.exit: 5211 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 5212 // CHECK3-NEXT: ret void 5213 // 5214 // 5215 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 5216 // CHECK3-SAME: () #[[ATTR6]] { 5217 // CHECK3-NEXT: entry: 5218 // CHECK3-NEXT: call void @__tgt_register_requires(i64 1) 5219 // CHECK3-NEXT: ret void 5220 // 5221 // 5222 // CHECK4-LABEL: define {{[^@]+}}@_Z7get_valv 5223 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] { 5224 // CHECK4-NEXT: entry: 5225 // CHECK4-NEXT: ret i64 0 5226 // 5227 // 5228 // CHECK4-LABEL: define {{[^@]+}}@_Z3fooi 5229 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 5230 // CHECK4-NEXT: entry: 5231 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5232 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 5233 // CHECK4-NEXT: [[AA:%.*]] = alloca i16, align 2 5234 // CHECK4-NEXT: [[B:%.*]] = alloca [10 x float], align 4 5235 // CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 5236 // CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 5237 // CHECK4-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 5238 // CHECK4-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 5239 // CHECK4-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 5240 // CHECK4-NEXT: [[K:%.*]] = alloca i64, align 8 5241 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5242 // CHECK4-NEXT: [[LIN:%.*]] = alloca i32, align 4 5243 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 5244 // CHECK4-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 5245 // CHECK4-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 5246 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 5247 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 5248 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 5249 // CHECK4-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4 5250 // CHECK4-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 5251 // CHECK4-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 5252 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 5253 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 5254 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 5255 // CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5256 // CHECK4-NEXT: [[A_CASTED11:%.*]] = alloca i32, align 4 5257 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 5258 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 5259 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 5260 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 5261 // CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 5262 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 5263 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5264 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 5265 // CHECK4-NEXT: store i16 0, i16* [[AA]], align 2 5266 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 5267 // CHECK4-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 5268 // CHECK4-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 5269 // CHECK4-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 5270 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 5271 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 5272 // CHECK4-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] 5273 // CHECK4-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 5274 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 5275 // CHECK4-NEXT: [[TMP5:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) 5276 // CHECK4-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 5277 // CHECK4-NEXT: br i1 [[TMP6]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 5278 // CHECK4: omp_offload.failed: 5279 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103() #[[ATTR4:[0-9]+]] 5280 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 5281 // CHECK4: omp_offload.cont: 5282 // CHECK4-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 5283 // CHECK4-NEXT: store i64 [[CALL]], i64* [[K]], align 8 5284 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 5285 // CHECK4-NEXT: store i32 [[TMP7]], i32* [[A_CASTED]], align 4 5286 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_CASTED]], align 4 5287 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP8]], i64* [[K]]) #[[ATTR4]] 5288 // CHECK4-NEXT: store i32 12, i32* [[LIN]], align 4 5289 // CHECK4-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2 5290 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 5291 // CHECK4-NEXT: store i16 [[TMP9]], i16* [[CONV]], align 2 5292 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[AA_CASTED]], align 4 5293 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[LIN]], align 4 5294 // CHECK4-NEXT: store i32 [[TMP11]], i32* [[LIN_CASTED]], align 4 5295 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 5296 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[A]], align 4 5297 // CHECK4-NEXT: store i32 [[TMP13]], i32* [[A_CASTED2]], align 4 5298 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[A_CASTED2]], align 4 5299 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5300 // CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* 5301 // CHECK4-NEXT: store i32 [[TMP10]], i32* [[TMP16]], align 4 5302 // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5303 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 5304 // CHECK4-NEXT: store i32 [[TMP10]], i32* [[TMP18]], align 4 5305 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 5306 // CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 5307 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 5308 // CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* 5309 // CHECK4-NEXT: store i32 [[TMP12]], i32* [[TMP21]], align 4 5310 // CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 5311 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 5312 // CHECK4-NEXT: store i32 [[TMP12]], i32* [[TMP23]], align 4 5313 // CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 5314 // CHECK4-NEXT: store i8* null, i8** [[TMP24]], align 4 5315 // CHECK4-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 5316 // CHECK4-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 5317 // CHECK4-NEXT: store i32 [[TMP14]], i32* [[TMP26]], align 4 5318 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 5319 // CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 5320 // CHECK4-NEXT: store i32 [[TMP14]], i32* [[TMP28]], align 4 5321 // CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 5322 // CHECK4-NEXT: store i8* null, i8** [[TMP29]], align 4 5323 // CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5324 // CHECK4-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5325 // CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0 5326 // CHECK4-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2 5327 // CHECK4-NEXT: store i16 [[TMP33]], i16* [[TMP32]], align 4 5328 // CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1 5329 // CHECK4-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN]], align 4 5330 // CHECK4-NEXT: store i32 [[TMP35]], i32* [[TMP34]], align 4 5331 // CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2 5332 // CHECK4-NEXT: [[TMP37:%.*]] = load i32, i32* [[A]], align 4 5333 // CHECK4-NEXT: store i32 [[TMP37]], i32* [[TMP36]], align 4 5334 // CHECK4-NEXT: [[TMP38:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 5335 // CHECK4-NEXT: [[TMP39:%.*]] = bitcast i8* [[TMP38]] to %struct.kmp_task_t_with_privates* 5336 // CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 0 5337 // CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP40]], i32 0, i32 0 5338 // CHECK4-NEXT: [[TMP42:%.*]] = load i8*, i8** [[TMP41]], align 4 5339 // CHECK4-NEXT: [[TMP43:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* 5340 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i32 12, i1 false) 5341 // CHECK4-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 1 5342 // CHECK4-NEXT: [[TMP45:%.*]] = bitcast i8* [[TMP42]] to %struct.anon* 5343 // CHECK4-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 0 5344 // CHECK4-NEXT: [[TMP47:%.*]] = bitcast [3 x i64]* [[TMP46]] to i8* 5345 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP47]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false) 5346 // CHECK4-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 1 5347 // CHECK4-NEXT: [[TMP49:%.*]] = bitcast [3 x i8*]* [[TMP48]] to i8* 5348 // CHECK4-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP30]] to i8* 5349 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP49]], i8* align 4 [[TMP50]], i32 12, i1 false) 5350 // CHECK4-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 2 5351 // CHECK4-NEXT: [[TMP52:%.*]] = bitcast [3 x i8*]* [[TMP51]] to i8* 5352 // CHECK4-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP31]] to i8* 5353 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP52]], i8* align 4 [[TMP53]], i32 12, i1 false) 5354 // CHECK4-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 3 5355 // CHECK4-NEXT: [[TMP55:%.*]] = load i16, i16* [[AA]], align 2 5356 // CHECK4-NEXT: store i16 [[TMP55]], i16* [[TMP54]], align 4 5357 // CHECK4-NEXT: [[TMP56:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP38]]) 5358 // CHECK4-NEXT: [[TMP57:%.*]] = load i32, i32* [[A]], align 4 5359 // CHECK4-NEXT: store i32 [[TMP57]], i32* [[A_CASTED3]], align 4 5360 // CHECK4-NEXT: [[TMP58:%.*]] = load i32, i32* [[A_CASTED3]], align 4 5361 // CHECK4-NEXT: [[TMP59:%.*]] = load i16, i16* [[AA]], align 2 5362 // CHECK4-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* 5363 // CHECK4-NEXT: store i16 [[TMP59]], i16* [[CONV5]], align 2 5364 // CHECK4-NEXT: [[TMP60:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 5365 // CHECK4-NEXT: [[TMP61:%.*]] = load i32, i32* [[N_ADDR]], align 4 5366 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP61]], 10 5367 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 5368 // CHECK4: omp_if.then: 5369 // CHECK4-NEXT: [[TMP62:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 5370 // CHECK4-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* 5371 // CHECK4-NEXT: store i32 [[TMP58]], i32* [[TMP63]], align 4 5372 // CHECK4-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 5373 // CHECK4-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* 5374 // CHECK4-NEXT: store i32 [[TMP58]], i32* [[TMP65]], align 4 5375 // CHECK4-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 5376 // CHECK4-NEXT: store i8* null, i8** [[TMP66]], align 4 5377 // CHECK4-NEXT: [[TMP67:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 5378 // CHECK4-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* 5379 // CHECK4-NEXT: store i32 [[TMP60]], i32* [[TMP68]], align 4 5380 // CHECK4-NEXT: [[TMP69:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 5381 // CHECK4-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* 5382 // CHECK4-NEXT: store i32 [[TMP60]], i32* [[TMP70]], align 4 5383 // CHECK4-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 5384 // CHECK4-NEXT: store i8* null, i8** [[TMP71]], align 4 5385 // CHECK4-NEXT: [[TMP72:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 5386 // CHECK4-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 5387 // CHECK4-NEXT: [[TMP74:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146.region_id, i32 2, i8** [[TMP72]], i8** [[TMP73]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 5388 // CHECK4-NEXT: [[TMP75:%.*]] = icmp ne i32 [[TMP74]], 0 5389 // CHECK4-NEXT: br i1 [[TMP75]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]] 5390 // CHECK4: omp_offload.failed9: 5391 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i32 [[TMP58]], i32 [[TMP60]]) #[[ATTR4]] 5392 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT10]] 5393 // CHECK4: omp_offload.cont10: 5394 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 5395 // CHECK4: omp_if.else: 5396 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i32 [[TMP58]], i32 [[TMP60]]) #[[ATTR4]] 5397 // CHECK4-NEXT: br label [[OMP_IF_END]] 5398 // CHECK4: omp_if.end: 5399 // CHECK4-NEXT: [[TMP76:%.*]] = load i32, i32* [[A]], align 4 5400 // CHECK4-NEXT: store i32 [[TMP76]], i32* [[DOTCAPTURE_EXPR_]], align 4 5401 // CHECK4-NEXT: [[TMP77:%.*]] = load i32, i32* [[A]], align 4 5402 // CHECK4-NEXT: store i32 [[TMP77]], i32* [[A_CASTED11]], align 4 5403 // CHECK4-NEXT: [[TMP78:%.*]] = load i32, i32* [[A_CASTED11]], align 4 5404 // CHECK4-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5405 // CHECK4-NEXT: store i32 [[TMP79]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5406 // CHECK4-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5407 // CHECK4-NEXT: [[TMP81:%.*]] = load i32, i32* [[N_ADDR]], align 4 5408 // CHECK4-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP81]], 20 5409 // CHECK4-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]] 5410 // CHECK4: omp_if.then13: 5411 // CHECK4-NEXT: [[TMP82:%.*]] = mul nuw i32 [[TMP1]], 4 5412 // CHECK4-NEXT: [[TMP83:%.*]] = sext i32 [[TMP82]] to i64 5413 // CHECK4-NEXT: [[TMP84:%.*]] = mul nuw i32 5, [[TMP3]] 5414 // CHECK4-NEXT: [[TMP85:%.*]] = mul nuw i32 [[TMP84]], 8 5415 // CHECK4-NEXT: [[TMP86:%.*]] = sext i32 [[TMP85]] to i64 5416 // CHECK4-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 5417 // CHECK4-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* 5418 // CHECK4-NEXT: store i32 [[TMP78]], i32* [[TMP88]], align 4 5419 // CHECK4-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 5420 // CHECK4-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* 5421 // CHECK4-NEXT: store i32 [[TMP78]], i32* [[TMP90]], align 4 5422 // CHECK4-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 5423 // CHECK4-NEXT: store i64 4, i64* [[TMP91]], align 4 5424 // CHECK4-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 5425 // CHECK4-NEXT: store i8* null, i8** [[TMP92]], align 4 5426 // CHECK4-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 5427 // CHECK4-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to [10 x float]** 5428 // CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP94]], align 4 5429 // CHECK4-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 5430 // CHECK4-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** 5431 // CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 4 5432 // CHECK4-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 5433 // CHECK4-NEXT: store i64 40, i64* [[TMP97]], align 4 5434 // CHECK4-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 5435 // CHECK4-NEXT: store i8* null, i8** [[TMP98]], align 4 5436 // CHECK4-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 5437 // CHECK4-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* 5438 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP100]], align 4 5439 // CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 5440 // CHECK4-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* 5441 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP102]], align 4 5442 // CHECK4-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 5443 // CHECK4-NEXT: store i64 4, i64* [[TMP103]], align 4 5444 // CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 5445 // CHECK4-NEXT: store i8* null, i8** [[TMP104]], align 4 5446 // CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 5447 // CHECK4-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to float** 5448 // CHECK4-NEXT: store float* [[VLA]], float** [[TMP106]], align 4 5449 // CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 5450 // CHECK4-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** 5451 // CHECK4-NEXT: store float* [[VLA]], float** [[TMP108]], align 4 5452 // CHECK4-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 5453 // CHECK4-NEXT: store i64 [[TMP83]], i64* [[TMP109]], align 4 5454 // CHECK4-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 5455 // CHECK4-NEXT: store i8* null, i8** [[TMP110]], align 4 5456 // CHECK4-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 5457 // CHECK4-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** 5458 // CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 4 5459 // CHECK4-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 5460 // CHECK4-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** 5461 // CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 4 5462 // CHECK4-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 5463 // CHECK4-NEXT: store i64 400, i64* [[TMP115]], align 4 5464 // CHECK4-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 5465 // CHECK4-NEXT: store i8* null, i8** [[TMP116]], align 4 5466 // CHECK4-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 5467 // CHECK4-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32* 5468 // CHECK4-NEXT: store i32 5, i32* [[TMP118]], align 4 5469 // CHECK4-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 5470 // CHECK4-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32* 5471 // CHECK4-NEXT: store i32 5, i32* [[TMP120]], align 4 5472 // CHECK4-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 5473 // CHECK4-NEXT: store i64 4, i64* [[TMP121]], align 4 5474 // CHECK4-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 5475 // CHECK4-NEXT: store i8* null, i8** [[TMP122]], align 4 5476 // CHECK4-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 5477 // CHECK4-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* 5478 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP124]], align 4 5479 // CHECK4-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 5480 // CHECK4-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* 5481 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 5482 // CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 5483 // CHECK4-NEXT: store i64 4, i64* [[TMP127]], align 4 5484 // CHECK4-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 5485 // CHECK4-NEXT: store i8* null, i8** [[TMP128]], align 4 5486 // CHECK4-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 5487 // CHECK4-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to double** 5488 // CHECK4-NEXT: store double* [[VLA1]], double** [[TMP130]], align 4 5489 // CHECK4-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 5490 // CHECK4-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** 5491 // CHECK4-NEXT: store double* [[VLA1]], double** [[TMP132]], align 4 5492 // CHECK4-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 5493 // CHECK4-NEXT: store i64 [[TMP86]], i64* [[TMP133]], align 4 5494 // CHECK4-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 5495 // CHECK4-NEXT: store i8* null, i8** [[TMP134]], align 4 5496 // CHECK4-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 5497 // CHECK4-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** 5498 // CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 5499 // CHECK4-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 5500 // CHECK4-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** 5501 // CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 5502 // CHECK4-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 5503 // CHECK4-NEXT: store i64 12, i64* [[TMP139]], align 4 5504 // CHECK4-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 5505 // CHECK4-NEXT: store i8* null, i8** [[TMP140]], align 4 5506 // CHECK4-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 5507 // CHECK4-NEXT: [[TMP142:%.*]] = bitcast i8** [[TMP141]] to i32* 5508 // CHECK4-NEXT: store i32 [[TMP80]], i32* [[TMP142]], align 4 5509 // CHECK4-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 5510 // CHECK4-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* 5511 // CHECK4-NEXT: store i32 [[TMP80]], i32* [[TMP144]], align 4 5512 // CHECK4-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 5513 // CHECK4-NEXT: store i64 4, i64* [[TMP145]], align 4 5514 // CHECK4-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 5515 // CHECK4-NEXT: store i8* null, i8** [[TMP146]], align 4 5516 // CHECK4-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 5517 // CHECK4-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 5518 // CHECK4-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 5519 // CHECK4-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP147]], i8** [[TMP148]], i64* [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 5520 // CHECK4-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 5521 // CHECK4-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] 5522 // CHECK4: omp_offload.failed17: 5523 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] 5524 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT18]] 5525 // CHECK4: omp_offload.cont18: 5526 // CHECK4-NEXT: br label [[OMP_IF_END20:%.*]] 5527 // CHECK4: omp_if.else19: 5528 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] 5529 // CHECK4-NEXT: br label [[OMP_IF_END20]] 5530 // CHECK4: omp_if.end20: 5531 // CHECK4-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 5532 // CHECK4-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 5533 // CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) 5534 // CHECK4-NEXT: ret i32 [[TMP152]] 5535 // 5536 // 5537 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 5538 // CHECK4-SAME: () #[[ATTR2:[0-9]+]] { 5539 // CHECK4-NEXT: entry: 5540 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 5541 // CHECK4-NEXT: ret void 5542 // 5543 // 5544 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. 5545 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 5546 // CHECK4-NEXT: entry: 5547 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5548 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5549 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5550 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5551 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5552 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5553 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5554 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5555 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5556 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5557 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5558 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5559 // CHECK4-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 5560 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5561 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5562 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5563 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 5564 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5565 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5566 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 5567 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5568 // CHECK4: cond.true: 5569 // CHECK4-NEXT: br label [[COND_END:%.*]] 5570 // CHECK4: cond.false: 5571 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5572 // CHECK4-NEXT: br label [[COND_END]] 5573 // CHECK4: cond.end: 5574 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 5575 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5576 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5577 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 5578 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5579 // CHECK4: omp.inner.for.cond: 5580 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5581 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5582 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 5583 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5584 // CHECK4: omp.inner.for.body: 5585 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5586 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 5587 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 5588 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4 5589 // CHECK4-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 5590 // CHECK4-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 5591 // CHECK4-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 5592 // CHECK4: .cancel.exit: 5593 // CHECK4-NEXT: br label [[CANCEL_EXIT:%.*]] 5594 // CHECK4: .cancel.continue: 5595 // CHECK4-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 5596 // CHECK4-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 5597 // CHECK4-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 5598 // CHECK4: .cancel.exit2: 5599 // CHECK4-NEXT: br label [[CANCEL_EXIT]] 5600 // CHECK4: .cancel.continue3: 5601 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5602 // CHECK4: omp.body.continue: 5603 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5604 // CHECK4: omp.inner.for.inc: 5605 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5606 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 5607 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 5608 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]] 5609 // CHECK4: omp.inner.for.end: 5610 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5611 // CHECK4: omp.loop.exit: 5612 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 5613 // CHECK4-NEXT: br label [[CANCEL_CONT:%.*]] 5614 // CHECK4: cancel.cont: 5615 // CHECK4-NEXT: ret void 5616 // CHECK4: cancel.exit: 5617 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 5618 // CHECK4-NEXT: br label [[CANCEL_CONT]] 5619 // 5620 // 5621 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110 5622 // CHECK4-SAME: (i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 5623 // CHECK4-NEXT: entry: 5624 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5625 // CHECK4-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 5626 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5627 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5628 // CHECK4-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 5629 // CHECK4-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 5630 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 5631 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 5632 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 5633 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) 5634 // CHECK4-NEXT: ret void 5635 // 5636 // 5637 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1 5638 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 5639 // CHECK4-NEXT: entry: 5640 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5641 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5642 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5643 // CHECK4-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 5644 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5645 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5646 // CHECK4-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 5647 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5648 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5649 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5650 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5651 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5652 // CHECK4-NEXT: [[K1:%.*]] = alloca i64, align 8 5653 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5654 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5655 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5656 // CHECK4-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 5657 // CHECK4-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 5658 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 5659 // CHECK4-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 5660 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5661 // CHECK4-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 5662 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5663 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5664 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5665 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 5666 // CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 5667 // CHECK4-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 35, i32 0, i32 8, i32 1, i32 1) 5668 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 5669 // CHECK4: omp.dispatch.cond: 5670 // CHECK4-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 5671 // CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 5672 // CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 5673 // CHECK4: omp.dispatch.body: 5674 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5675 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 5676 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5677 // CHECK4: omp.inner.for.cond: 5678 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 5679 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 5680 // CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 5681 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5682 // CHECK4: omp.inner.for.body: 5683 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 5684 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 5685 // CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 5686 // CHECK4-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !13 5687 // CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !13 5688 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 5689 // CHECK4-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 5690 // CHECK4-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 5691 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] 5692 // CHECK4-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !13 5693 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !13 5694 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 5695 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !13 5696 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5697 // CHECK4: omp.body.continue: 5698 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5699 // CHECK4: omp.inner.for.inc: 5700 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 5701 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 5702 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 5703 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 5704 // CHECK4: omp.inner.for.end: 5705 // CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 5706 // CHECK4: omp.dispatch.inc: 5707 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND]] 5708 // CHECK4: omp.dispatch.end: 5709 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5710 // CHECK4-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 5711 // CHECK4-NEXT: br i1 [[TMP14]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 5712 // CHECK4: .omp.linear.pu: 5713 // CHECK4-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8 5714 // CHECK4-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP15]], 27 5715 // CHECK4-NEXT: store i64 [[ADD5]], i64* [[TMP0]], align 8 5716 // CHECK4-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 5717 // CHECK4: .omp.linear.pu.done: 5718 // CHECK4-NEXT: ret void 5719 // 5720 // 5721 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 5722 // CHECK4-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR2]] { 5723 // CHECK4-NEXT: entry: 5724 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5725 // CHECK4-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 5726 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5727 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 5728 // CHECK4-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 5729 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5730 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5731 // CHECK4-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 5732 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5733 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5734 // CHECK4-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 5735 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 5736 // CHECK4-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 5737 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 5738 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 5739 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 5740 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 5741 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 5742 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 5743 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 5744 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 5745 // CHECK4-NEXT: ret void 5746 // 5747 // 5748 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2 5749 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR3]] { 5750 // CHECK4-NEXT: entry: 5751 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5752 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5753 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5754 // CHECK4-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 5755 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5756 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 5757 // CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 4 5758 // CHECK4-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 5759 // CHECK4-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 5760 // CHECK4-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 5761 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 5762 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 5763 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 5764 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5765 // CHECK4-NEXT: [[IT:%.*]] = alloca i64, align 8 5766 // CHECK4-NEXT: [[LIN2:%.*]] = alloca i32, align 4 5767 // CHECK4-NEXT: [[A3:%.*]] = alloca i32, align 4 5768 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5769 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5770 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5771 // CHECK4-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 5772 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5773 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5774 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 5775 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 5776 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 5777 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 5778 // CHECK4-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 5779 // CHECK4-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 5780 // CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 5781 // CHECK4-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 5782 // CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 5783 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5784 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5785 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 5786 // CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 5787 // CHECK4-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 5788 // CHECK4-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5789 // CHECK4-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 5790 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5791 // CHECK4: cond.true: 5792 // CHECK4-NEXT: br label [[COND_END:%.*]] 5793 // CHECK4: cond.false: 5794 // CHECK4-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5795 // CHECK4-NEXT: br label [[COND_END]] 5796 // CHECK4: cond.end: 5797 // CHECK4-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 5798 // CHECK4-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 5799 // CHECK4-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 5800 // CHECK4-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 5801 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5802 // CHECK4: omp.inner.for.cond: 5803 // CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5804 // CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 5805 // CHECK4-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 5806 // CHECK4-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5807 // CHECK4: omp.inner.for.body: 5808 // CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5809 // CHECK4-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 5810 // CHECK4-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 5811 // CHECK4-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 5812 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 5813 // CHECK4-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 5814 // CHECK4-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5815 // CHECK4-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 5816 // CHECK4-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 5817 // CHECK4-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 5818 // CHECK4-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 5819 // CHECK4-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4 5820 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 5821 // CHECK4-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 5822 // CHECK4-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5823 // CHECK4-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 5824 // CHECK4-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 5825 // CHECK4-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 5826 // CHECK4-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 5827 // CHECK4-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4 5828 // CHECK4-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4 5829 // CHECK4-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 5830 // CHECK4-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 5831 // CHECK4-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 5832 // CHECK4-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4 5833 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5834 // CHECK4: omp.body.continue: 5835 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5836 // CHECK4: omp.inner.for.inc: 5837 // CHECK4-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 5838 // CHECK4-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 5839 // CHECK4-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8 5840 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]] 5841 // CHECK4: omp.inner.for.end: 5842 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5843 // CHECK4: omp.loop.exit: 5844 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 5845 // CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5846 // CHECK4-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 5847 // CHECK4-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 5848 // CHECK4: .omp.linear.pu: 5849 // CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 5850 // CHECK4-NEXT: [[CONV16:%.*]] = sext i32 [[TMP20]] to i64 5851 // CHECK4-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 5852 // CHECK4-NEXT: [[MUL17:%.*]] = mul i64 4, [[TMP21]] 5853 // CHECK4-NEXT: [[ADD18:%.*]] = add i64 [[CONV16]], [[MUL17]] 5854 // CHECK4-NEXT: [[CONV19:%.*]] = trunc i64 [[ADD18]] to i32 5855 // CHECK4-NEXT: store i32 [[CONV19]], i32* [[LIN_ADDR]], align 4 5856 // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 5857 // CHECK4-NEXT: [[CONV20:%.*]] = sext i32 [[TMP22]] to i64 5858 // CHECK4-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 5859 // CHECK4-NEXT: [[MUL21:%.*]] = mul i64 4, [[TMP23]] 5860 // CHECK4-NEXT: [[ADD22:%.*]] = add i64 [[CONV20]], [[MUL21]] 5861 // CHECK4-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD22]] to i32 5862 // CHECK4-NEXT: store i32 [[CONV23]], i32* [[A_ADDR]], align 4 5863 // CHECK4-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 5864 // CHECK4: .omp.linear.pu.done: 5865 // CHECK4-NEXT: ret void 5866 // 5867 // 5868 // CHECK4-LABEL: define {{[^@]+}}@.omp_task_privates_map. 5869 // CHECK4-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i16** noalias [[TMP1:%.*]], [3 x i8*]** noalias [[TMP2:%.*]], [3 x i8*]** noalias [[TMP3:%.*]], [3 x i64]** noalias [[TMP4:%.*]]) #[[ATTR6:[0-9]+]] { 5870 // CHECK4-NEXT: entry: 5871 // CHECK4-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 4 5872 // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca i16**, align 4 5873 // CHECK4-NEXT: [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 4 5874 // CHECK4-NEXT: [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 4 5875 // CHECK4-NEXT: [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 4 5876 // CHECK4-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 4 5877 // CHECK4-NEXT: store i16** [[TMP1]], i16*** [[DOTADDR1]], align 4 5878 // CHECK4-NEXT: store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 4 5879 // CHECK4-NEXT: store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 4 5880 // CHECK4-NEXT: store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 4 5881 // CHECK4-NEXT: [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 4 5882 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0 5883 // CHECK4-NEXT: [[TMP7:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 4 5884 // CHECK4-NEXT: store [3 x i64]* [[TMP6]], [3 x i64]** [[TMP7]], align 4 5885 // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1 5886 // CHECK4-NEXT: [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 4 5887 // CHECK4-NEXT: store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 4 5888 // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2 5889 // CHECK4-NEXT: [[TMP11:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 4 5890 // CHECK4-NEXT: store [3 x i8*]* [[TMP10]], [3 x i8*]** [[TMP11]], align 4 5891 // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3 5892 // CHECK4-NEXT: [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 4 5893 // CHECK4-NEXT: store i16* [[TMP12]], i16** [[TMP13]], align 4 5894 // CHECK4-NEXT: ret void 5895 // 5896 // 5897 // CHECK4-LABEL: define {{[^@]+}}@.omp_task_entry. 5898 // CHECK4-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] { 5899 // CHECK4-NEXT: entry: 5900 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 5901 // CHECK4-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 5902 // CHECK4-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 5903 // CHECK4-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 5904 // CHECK4-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 5905 // CHECK4-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 5906 // CHECK4-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 4 5907 // CHECK4-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 4 5908 // CHECK4-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 4 5909 // CHECK4-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 4 5910 // CHECK4-NEXT: [[AA_CASTED_I:%.*]] = alloca i32, align 4 5911 // CHECK4-NEXT: [[LIN_CASTED_I:%.*]] = alloca i32, align 4 5912 // CHECK4-NEXT: [[A_CASTED_I:%.*]] = alloca i32, align 4 5913 // CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 5914 // CHECK4-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 5915 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 5916 // CHECK4-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 5917 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 5918 // CHECK4-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 5919 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 5920 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 5921 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 5922 // CHECK4-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 5923 // CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 5924 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 5925 // CHECK4-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* 5926 // CHECK4-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 5927 // CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) 5928 // CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) 5929 // CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) 5930 // CHECK4-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) 5931 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 5932 // CHECK4-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !25 5933 // CHECK4-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 5934 // CHECK4-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 5935 // CHECK4-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !25 5936 // CHECK4-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !25 5937 // CHECK4-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !25 5938 // CHECK4-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 5939 // CHECK4-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 5940 // CHECK4-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* 5941 // CHECK4-NEXT: call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] 5942 // CHECK4-NEXT: [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !25 5943 // CHECK4-NEXT: [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !25 5944 // CHECK4-NEXT: [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !25 5945 // CHECK4-NEXT: [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !25 5946 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i32 0, i32 0 5947 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i32 0, i32 0 5948 // CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i32 0, i32 0 5949 // CHECK4-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1 5950 // CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2 5951 // CHECK4-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 5952 // CHECK4-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 5953 // CHECK4-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] 5954 // CHECK4: omp_offload.failed.i: 5955 // CHECK4-NEXT: [[TMP27:%.*]] = load i16, i16* [[TMP16]], align 2 5956 // CHECK4-NEXT: [[CONV_I:%.*]] = bitcast i32* [[AA_CASTED_I]] to i16* 5957 // CHECK4-NEXT: store i16 [[TMP27]], i16* [[CONV_I]], align 2, !noalias !25 5958 // CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[AA_CASTED_I]], align 4, !noalias !25 5959 // CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP23]], align 4 5960 // CHECK4-NEXT: store i32 [[TMP29]], i32* [[LIN_CASTED_I]], align 4, !noalias !25 5961 // CHECK4-NEXT: [[TMP30:%.*]] = load i32, i32* [[LIN_CASTED_I]], align 4, !noalias !25 5962 // CHECK4-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP24]], align 4 5963 // CHECK4-NEXT: store i32 [[TMP31]], i32* [[A_CASTED_I]], align 4, !noalias !25 5964 // CHECK4-NEXT: [[TMP32:%.*]] = load i32, i32* [[A_CASTED_I]], align 4, !noalias !25 5965 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138(i32 [[TMP28]], i32 [[TMP30]], i32 [[TMP32]]) #[[ATTR4]] 5966 // CHECK4-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] 5967 // CHECK4: .omp_outlined..3.exit: 5968 // CHECK4-NEXT: ret i32 0 5969 // 5970 // 5971 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 5972 // CHECK4-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] { 5973 // CHECK4-NEXT: entry: 5974 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5975 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5976 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 5977 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 5978 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 5979 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 5980 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 5981 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 5982 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 5983 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 5984 // CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 5985 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 5986 // CHECK4-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 5987 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 5988 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 5989 // CHECK4-NEXT: ret void 5990 // 5991 // 5992 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4 5993 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR3]] { 5994 // CHECK4-NEXT: entry: 5995 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5996 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5997 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 5998 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 5999 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6000 // CHECK4-NEXT: [[TMP:%.*]] = alloca i16, align 2 6001 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6002 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6003 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 6004 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6005 // CHECK4-NEXT: [[IT:%.*]] = alloca i16, align 2 6006 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6007 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6008 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6009 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 6010 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 6011 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6012 // CHECK4-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 6013 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 6014 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6015 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 6016 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 6017 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 6018 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6019 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 6020 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6021 // CHECK4: cond.true: 6022 // CHECK4-NEXT: br label [[COND_END:%.*]] 6023 // CHECK4: cond.false: 6024 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6025 // CHECK4-NEXT: br label [[COND_END]] 6026 // CHECK4: cond.end: 6027 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 6028 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 6029 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6030 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 6031 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6032 // CHECK4: omp.inner.for.cond: 6033 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6034 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6035 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 6036 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6037 // CHECK4: omp.inner.for.body: 6038 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6039 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 6040 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 6041 // CHECK4-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 6042 // CHECK4-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2 6043 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 6044 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 6045 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4 6046 // CHECK4-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4 6047 // CHECK4-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 6048 // CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 6049 // CHECK4-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 6050 // CHECK4-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4 6051 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6052 // CHECK4: omp.body.continue: 6053 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6054 // CHECK4: omp.inner.for.inc: 6055 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6056 // CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 6057 // CHECK4-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 6058 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]] 6059 // CHECK4: omp.inner.for.end: 6060 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6061 // CHECK4: omp.loop.exit: 6062 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 6063 // CHECK4-NEXT: ret void 6064 // 6065 // 6066 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 6067 // CHECK4-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 6068 // CHECK4-NEXT: entry: 6069 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6070 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 6071 // CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 6072 // CHECK4-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 6073 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 6074 // CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 6075 // CHECK4-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 6076 // CHECK4-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 6077 // CHECK4-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 6078 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6079 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6080 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 6081 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6082 // CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 6083 // CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 6084 // CHECK4-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 6085 // CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 6086 // CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 6087 // CHECK4-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 6088 // CHECK4-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 6089 // CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 6090 // CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6091 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 6092 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 6093 // CHECK4-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 6094 // CHECK4-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 6095 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 6096 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 6097 // CHECK4-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 6098 // CHECK4-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 6099 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 6100 // CHECK4-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 6101 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 6102 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6103 // CHECK4-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 6104 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 6105 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 6106 // CHECK4-NEXT: ret void 6107 // 6108 // 6109 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7 6110 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 6111 // CHECK4-NEXT: entry: 6112 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6113 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6114 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6115 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 6116 // CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 6117 // CHECK4-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 6118 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 6119 // CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 6120 // CHECK4-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 6121 // CHECK4-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 6122 // CHECK4-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 6123 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 6124 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6125 // CHECK4-NEXT: [[TMP:%.*]] = alloca i8, align 1 6126 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6127 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6128 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 6129 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6130 // CHECK4-NEXT: [[IT:%.*]] = alloca i8, align 1 6131 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6132 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6133 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6134 // CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 6135 // CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 6136 // CHECK4-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 6137 // CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 6138 // CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 6139 // CHECK4-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 6140 // CHECK4-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 6141 // CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 6142 // CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6143 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 6144 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 6145 // CHECK4-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 6146 // CHECK4-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 6147 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 6148 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 6149 // CHECK4-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 6150 // CHECK4-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 6151 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6152 // CHECK4-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 6153 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 6154 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6155 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 6156 // CHECK4-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 6157 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 6158 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 6159 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 6160 // CHECK4: omp.dispatch.cond: 6161 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6162 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 6163 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6164 // CHECK4: cond.true: 6165 // CHECK4-NEXT: br label [[COND_END:%.*]] 6166 // CHECK4: cond.false: 6167 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6168 // CHECK4-NEXT: br label [[COND_END]] 6169 // CHECK4: cond.end: 6170 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 6171 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 6172 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6173 // CHECK4-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 6174 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6175 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6176 // CHECK4-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 6177 // CHECK4-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 6178 // CHECK4: omp.dispatch.body: 6179 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6180 // CHECK4: omp.inner.for.cond: 6181 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6182 // CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6183 // CHECK4-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 6184 // CHECK4-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6185 // CHECK4: omp.inner.for.body: 6186 // CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6187 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 6188 // CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 6189 // CHECK4-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 6190 // CHECK4-NEXT: store i8 [[CONV]], i8* [[IT]], align 1 6191 // CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4 6192 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 6193 // CHECK4-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 6194 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 6195 // CHECK4-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 6196 // CHECK4-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 6197 // CHECK4-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 6198 // CHECK4-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 6199 // CHECK4-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4 6200 // CHECK4-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 6201 // CHECK4-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4 6202 // CHECK4-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 6203 // CHECK4-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 6204 // CHECK4-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 6205 // CHECK4-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4 6206 // CHECK4-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 6207 // CHECK4-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 6208 // CHECK4-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8 6209 // CHECK4-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 6210 // CHECK4-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8 6211 // CHECK4-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 6212 // CHECK4-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 6213 // CHECK4-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 6214 // CHECK4-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8 6215 // CHECK4-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 6216 // CHECK4-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8 6217 // CHECK4-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 6218 // CHECK4-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4 6219 // CHECK4-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 6220 // CHECK4-NEXT: store i64 [[ADD20]], i64* [[X]], align 4 6221 // CHECK4-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 6222 // CHECK4-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4 6223 // CHECK4-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 6224 // CHECK4-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 6225 // CHECK4-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 6226 // CHECK4-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4 6227 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6228 // CHECK4: omp.body.continue: 6229 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6230 // CHECK4: omp.inner.for.inc: 6231 // CHECK4-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6232 // CHECK4-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 6233 // CHECK4-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4 6234 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]] 6235 // CHECK4: omp.inner.for.end: 6236 // CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 6237 // CHECK4: omp.dispatch.inc: 6238 // CHECK4-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6239 // CHECK4-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 6240 // CHECK4-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 6241 // CHECK4-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 6242 // CHECK4-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6243 // CHECK4-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 6244 // CHECK4-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 6245 // CHECK4-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 6246 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND]] 6247 // CHECK4: omp.dispatch.end: 6248 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 6249 // CHECK4-NEXT: ret void 6250 // 6251 // 6252 // CHECK4-LABEL: define {{[^@]+}}@_Z3bari 6253 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 6254 // CHECK4-NEXT: entry: 6255 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6256 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 6257 // CHECK4-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 6258 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6259 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 6260 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6261 // CHECK4-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 6262 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 6263 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 6264 // CHECK4-NEXT: store i32 [[ADD]], i32* [[A]], align 4 6265 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 6266 // CHECK4-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 6267 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 6268 // CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 6269 // CHECK4-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 6270 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 6271 // CHECK4-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 6272 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 6273 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 6274 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 6275 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 6276 // CHECK4-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 6277 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 6278 // CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 6279 // CHECK4-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 6280 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 6281 // CHECK4-NEXT: ret i32 [[TMP8]] 6282 // 6283 // 6284 // CHECK4-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 6285 // CHECK4-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 6286 // CHECK4-NEXT: entry: 6287 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6288 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6289 // CHECK4-NEXT: [[B:%.*]] = alloca i32, align 4 6290 // CHECK4-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 6291 // CHECK4-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 6292 // CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 6293 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 6294 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 6295 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 6296 // CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 6297 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6298 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6299 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6300 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 6301 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 6302 // CHECK4-NEXT: store i32 [[ADD]], i32* [[B]], align 4 6303 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 6304 // CHECK4-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 6305 // CHECK4-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 6306 // CHECK4-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 6307 // CHECK4-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 6308 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 6309 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4 6310 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 6311 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 6312 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 6313 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 6314 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 6315 // CHECK4: omp_if.then: 6316 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 6317 // CHECK4-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]] 6318 // CHECK4-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2 6319 // CHECK4-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 6320 // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6321 // CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 6322 // CHECK4-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4 6323 // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6324 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 6325 // CHECK4-NEXT: store double* [[A]], double** [[TMP13]], align 4 6326 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 6327 // CHECK4-NEXT: store i64 8, i64* [[TMP14]], align 4 6328 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 6329 // CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 6330 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 6331 // CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 6332 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 6333 // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 6334 // CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 6335 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 6336 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 6337 // CHECK4-NEXT: store i64 4, i64* [[TMP20]], align 4 6338 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 6339 // CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 6340 // CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 6341 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 6342 // CHECK4-NEXT: store i32 2, i32* [[TMP23]], align 4 6343 // CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 6344 // CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 6345 // CHECK4-NEXT: store i32 2, i32* [[TMP25]], align 4 6346 // CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 6347 // CHECK4-NEXT: store i64 4, i64* [[TMP26]], align 4 6348 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 6349 // CHECK4-NEXT: store i8* null, i8** [[TMP27]], align 4 6350 // CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 6351 // CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* 6352 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 6353 // CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 6354 // CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* 6355 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 6356 // CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 6357 // CHECK4-NEXT: store i64 4, i64* [[TMP32]], align 4 6358 // CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 6359 // CHECK4-NEXT: store i8* null, i8** [[TMP33]], align 4 6360 // CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 6361 // CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 6362 // CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 6363 // CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 6364 // CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 6365 // CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 6366 // CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 6367 // CHECK4-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 6368 // CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 6369 // CHECK4-NEXT: store i8* null, i8** [[TMP39]], align 4 6370 // CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6371 // CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6372 // CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 6373 // CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 6374 // CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 6375 // CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 6376 // CHECK4: omp_offload.failed: 6377 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 6378 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 6379 // CHECK4: omp_offload.cont: 6380 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 6381 // CHECK4: omp_if.else: 6382 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 6383 // CHECK4-NEXT: br label [[OMP_IF_END]] 6384 // CHECK4: omp_if.end: 6385 // CHECK4-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] 6386 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] 6387 // CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 6388 // CHECK4-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 6389 // CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 6390 // CHECK4-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 6391 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] 6392 // CHECK4-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 6393 // CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 6394 // CHECK4-NEXT: ret i32 [[ADD3]] 6395 // 6396 // 6397 // CHECK4-LABEL: define {{[^@]+}}@_ZL7fstatici 6398 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 6399 // CHECK4-NEXT: entry: 6400 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6401 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 6402 // CHECK4-NEXT: [[AA:%.*]] = alloca i16, align 2 6403 // CHECK4-NEXT: [[AAA:%.*]] = alloca i8, align 1 6404 // CHECK4-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 6405 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6406 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 6407 // CHECK4-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 6408 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 6409 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 6410 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 6411 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6412 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 6413 // CHECK4-NEXT: store i16 0, i16* [[AA]], align 2 6414 // CHECK4-NEXT: store i8 0, i8* [[AAA]], align 1 6415 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 6416 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 6417 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 6418 // CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 6419 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 6420 // CHECK4-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 6421 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 6422 // CHECK4-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 6423 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 6424 // CHECK4-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 6425 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 6426 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 6427 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 6428 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 6429 // CHECK4: omp_if.then: 6430 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6431 // CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 6432 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 6433 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6434 // CHECK4-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 6435 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 6436 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 6437 // CHECK4-NEXT: store i8* null, i8** [[TMP11]], align 4 6438 // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 6439 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 6440 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 6441 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 6442 // CHECK4-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 6443 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 6444 // CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 6445 // CHECK4-NEXT: store i8* null, i8** [[TMP16]], align 4 6446 // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 6447 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 6448 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 6449 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 6450 // CHECK4-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 6451 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 6452 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 6453 // CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 6454 // CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 6455 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 6456 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 6457 // CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 6458 // CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 6459 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 6460 // CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 6461 // CHECK4-NEXT: store i8* null, i8** [[TMP26]], align 4 6462 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6463 // CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6464 // CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 6465 // CHECK4-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 6466 // CHECK4-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 6467 // CHECK4: omp_offload.failed: 6468 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 6469 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 6470 // CHECK4: omp_offload.cont: 6471 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 6472 // CHECK4: omp_if.else: 6473 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 6474 // CHECK4-NEXT: br label [[OMP_IF_END]] 6475 // CHECK4: omp_if.end: 6476 // CHECK4-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 6477 // CHECK4-NEXT: ret i32 [[TMP31]] 6478 // 6479 // 6480 // CHECK4-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 6481 // CHECK4-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 6482 // CHECK4-NEXT: entry: 6483 // CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 6484 // CHECK4-NEXT: [[A:%.*]] = alloca i32, align 4 6485 // CHECK4-NEXT: [[AA:%.*]] = alloca i16, align 2 6486 // CHECK4-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 6487 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6488 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 6489 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 6490 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 6491 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 6492 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 6493 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 6494 // CHECK4-NEXT: store i16 0, i16* [[AA]], align 2 6495 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 6496 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 6497 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 6498 // CHECK4-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 6499 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 6500 // CHECK4-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 6501 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 6502 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 6503 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 6504 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 6505 // CHECK4: omp_if.then: 6506 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6507 // CHECK4-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 6508 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 6509 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6510 // CHECK4-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 6511 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 6512 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 6513 // CHECK4-NEXT: store i8* null, i8** [[TMP9]], align 4 6514 // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 6515 // CHECK4-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* 6516 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 6517 // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 6518 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 6519 // CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 6520 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 6521 // CHECK4-NEXT: store i8* null, i8** [[TMP14]], align 4 6522 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 6523 // CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 6524 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 6525 // CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 6526 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 6527 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 6528 // CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 6529 // CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 6530 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 6531 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 6532 // CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 6533 // CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 6534 // CHECK4-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 6535 // CHECK4: omp_offload.failed: 6536 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 6537 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 6538 // CHECK4: omp_offload.cont: 6539 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 6540 // CHECK4: omp_if.else: 6541 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 6542 // CHECK4-NEXT: br label [[OMP_IF_END]] 6543 // CHECK4: omp_if.end: 6544 // CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 6545 // CHECK4-NEXT: ret i32 [[TMP24]] 6546 // 6547 // 6548 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 6549 // CHECK4-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 6550 // CHECK4-NEXT: entry: 6551 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6552 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6553 // CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 6554 // CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 6555 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 6556 // CHECK4-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 6557 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6558 // CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6559 // CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 6560 // CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 6561 // CHECK4-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 6562 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6563 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 6564 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 6565 // CHECK4-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 6566 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 6567 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 6568 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 6569 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 6570 // CHECK4-NEXT: ret void 6571 // 6572 // 6573 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..9 6574 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 6575 // CHECK4-NEXT: entry: 6576 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6577 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6578 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 6579 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 6580 // CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 6581 // CHECK4-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 6582 // CHECK4-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 6583 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 6584 // CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 4 6585 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 6586 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 6587 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 6588 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6589 // CHECK4-NEXT: [[IT:%.*]] = alloca i64, align 8 6590 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6591 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6592 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 6593 // CHECK4-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 6594 // CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 6595 // CHECK4-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 6596 // CHECK4-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 6597 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 6598 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 6599 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 6600 // CHECK4-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 6601 // CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 6602 // CHECK4-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 6603 // CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 6604 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6605 // CHECK4-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 6606 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 6607 // CHECK4-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 6608 // CHECK4-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6609 // CHECK4-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 6610 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6611 // CHECK4: cond.true: 6612 // CHECK4-NEXT: br label [[COND_END:%.*]] 6613 // CHECK4: cond.false: 6614 // CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6615 // CHECK4-NEXT: br label [[COND_END]] 6616 // CHECK4: cond.end: 6617 // CHECK4-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 6618 // CHECK4-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 6619 // CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 6620 // CHECK4-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 6621 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6622 // CHECK4: omp.inner.for.cond: 6623 // CHECK4-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 6624 // CHECK4-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6625 // CHECK4-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 6626 // CHECK4-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6627 // CHECK4: omp.inner.for.body: 6628 // CHECK4-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 6629 // CHECK4-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 6630 // CHECK4-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 6631 // CHECK4-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 6632 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4 6633 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 6634 // CHECK4-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 6635 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 6636 // CHECK4-NEXT: store double [[ADD]], double* [[A]], align 4 6637 // CHECK4-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 6638 // CHECK4-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4 6639 // CHECK4-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 6640 // CHECK4-NEXT: store double [[INC]], double* [[A4]], align 4 6641 // CHECK4-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 6642 // CHECK4-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 6643 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 6644 // CHECK4-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 6645 // CHECK4-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2 6646 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6647 // CHECK4: omp.body.continue: 6648 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6649 // CHECK4: omp.inner.for.inc: 6650 // CHECK4-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 6651 // CHECK4-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 6652 // CHECK4-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 6653 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]] 6654 // CHECK4: omp.inner.for.end: 6655 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6656 // CHECK4: omp.loop.exit: 6657 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 6658 // CHECK4-NEXT: ret void 6659 // 6660 // 6661 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 6662 // CHECK4-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 6663 // CHECK4-NEXT: entry: 6664 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6665 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 6666 // CHECK4-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 6667 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 6668 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6669 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 6670 // CHECK4-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 6671 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6672 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 6673 // CHECK4-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 6674 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 6675 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 6676 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 6677 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 6678 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 6679 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 6680 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 6681 // CHECK4-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 6682 // CHECK4-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 6683 // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 6684 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 6685 // CHECK4-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 6686 // CHECK4-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 6687 // CHECK4-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 6688 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 6689 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 6690 // CHECK4-NEXT: ret void 6691 // 6692 // 6693 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 6694 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 6695 // CHECK4-NEXT: entry: 6696 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6697 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6698 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6699 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 6700 // CHECK4-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 6701 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 6702 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6703 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 6704 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6705 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6706 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6707 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 6708 // CHECK4-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 6709 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 6710 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 6711 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 6712 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 6713 // CHECK4-NEXT: ret void 6714 // 6715 // 6716 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 6717 // CHECK4-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 6718 // CHECK4-NEXT: entry: 6719 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6720 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 6721 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 6722 // CHECK4-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 6723 // CHECK4-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 6724 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6725 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 6726 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 6727 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 6728 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 6729 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 6730 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 6731 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 6732 // CHECK4-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 6733 // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 6734 // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 6735 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 6736 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 6737 // CHECK4-NEXT: ret void 6738 // 6739 // 6740 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14 6741 // CHECK4-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 6742 // CHECK4-NEXT: entry: 6743 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 6744 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 6745 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 6746 // CHECK4-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 6747 // CHECK4-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 6748 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 6749 // CHECK4-NEXT: [[TMP:%.*]] = alloca i64, align 4 6750 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 6751 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 6752 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 6753 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6754 // CHECK4-NEXT: [[I:%.*]] = alloca i64, align 8 6755 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 6756 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 6757 // CHECK4-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 6758 // CHECK4-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 6759 // CHECK4-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 6760 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 6761 // CHECK4-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 6762 // CHECK4-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 6763 // CHECK4-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 6764 // CHECK4-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 6765 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6766 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 6767 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 6768 // CHECK4-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 6769 // CHECK4-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6770 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 6771 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6772 // CHECK4: cond.true: 6773 // CHECK4-NEXT: br label [[COND_END:%.*]] 6774 // CHECK4: cond.false: 6775 // CHECK4-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6776 // CHECK4-NEXT: br label [[COND_END]] 6777 // CHECK4: cond.end: 6778 // CHECK4-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 6779 // CHECK4-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 6780 // CHECK4-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 6781 // CHECK4-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 6782 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6783 // CHECK4: omp.inner.for.cond: 6784 // CHECK4-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 6785 // CHECK4-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6786 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 6787 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6788 // CHECK4: omp.inner.for.body: 6789 // CHECK4-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 6790 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 6791 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 6792 // CHECK4-NEXT: store i64 [[ADD]], i64* [[I]], align 8 6793 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 6794 // CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 6795 // CHECK4-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4 6796 // CHECK4-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4 6797 // CHECK4-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 6798 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 6799 // CHECK4-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 6800 // CHECK4-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4 6801 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 6802 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 6803 // CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 6804 // CHECK4-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 6805 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6806 // CHECK4: omp.body.continue: 6807 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6808 // CHECK4: omp.inner.for.inc: 6809 // CHECK4-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 6810 // CHECK4-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 6811 // CHECK4-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 6812 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]] 6813 // CHECK4: omp.inner.for.end: 6814 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6815 // CHECK4: omp.loop.exit: 6816 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 6817 // CHECK4-NEXT: ret void 6818 // 6819 // 6820 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 6821 // CHECK4-SAME: () #[[ATTR6]] { 6822 // CHECK4-NEXT: entry: 6823 // CHECK4-NEXT: call void @__tgt_register_requires(i64 1) 6824 // CHECK4-NEXT: ret void 6825 // 6826 // 6827 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 6828 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] { 6829 // CHECK9-NEXT: entry: 6830 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 6831 // CHECK9-NEXT: ret void 6832 // 6833 // 6834 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined. 6835 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 6836 // CHECK9-NEXT: entry: 6837 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6838 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6839 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6840 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 6841 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6842 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6843 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 6844 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6845 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 6846 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6847 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6848 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6849 // CHECK9-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 6850 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 6851 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6852 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 6853 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 6854 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 6855 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6856 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 6857 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6858 // CHECK9: cond.true: 6859 // CHECK9-NEXT: br label [[COND_END:%.*]] 6860 // CHECK9: cond.false: 6861 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6862 // CHECK9-NEXT: br label [[COND_END]] 6863 // CHECK9: cond.end: 6864 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 6865 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 6866 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6867 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 6868 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6869 // CHECK9: omp.inner.for.cond: 6870 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6871 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6872 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 6873 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6874 // CHECK9: omp.inner.for.body: 6875 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6876 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 6877 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 6878 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4 6879 // CHECK9-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 6880 // CHECK9-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 6881 // CHECK9-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 6882 // CHECK9: .cancel.exit: 6883 // CHECK9-NEXT: br label [[CANCEL_EXIT:%.*]] 6884 // CHECK9: .cancel.continue: 6885 // CHECK9-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 6886 // CHECK9-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 6887 // CHECK9-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 6888 // CHECK9: .cancel.exit2: 6889 // CHECK9-NEXT: br label [[CANCEL_EXIT]] 6890 // CHECK9: .cancel.continue3: 6891 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6892 // CHECK9: omp.body.continue: 6893 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6894 // CHECK9: omp.inner.for.inc: 6895 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6896 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 6897 // CHECK9-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 6898 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] 6899 // CHECK9: omp.inner.for.end: 6900 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6901 // CHECK9: omp.loop.exit: 6902 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 6903 // CHECK9-NEXT: br label [[CANCEL_CONT:%.*]] 6904 // CHECK9: cancel.cont: 6905 // CHECK9-NEXT: ret void 6906 // CHECK9: cancel.exit: 6907 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 6908 // CHECK9-NEXT: br label [[CANCEL_CONT]] 6909 // 6910 // 6911 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 6912 // CHECK9-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR0]] { 6913 // CHECK9-NEXT: entry: 6914 // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 6915 // CHECK9-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 6916 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 6917 // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 6918 // CHECK9-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 6919 // CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 6920 // CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 6921 // CHECK9-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 6922 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 6923 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 6924 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 6925 // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 6926 // CHECK9-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 6927 // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 6928 // CHECK9-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 6929 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 6930 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 6931 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 6932 // CHECK9-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 6933 // CHECK9-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 6934 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 6935 // CHECK9-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 6936 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 6937 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 6938 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 6939 // CHECK9-NEXT: ret void 6940 // 6941 // 6942 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1 6943 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR1]] { 6944 // CHECK9-NEXT: entry: 6945 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6946 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6947 // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 6948 // CHECK9-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 6949 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 6950 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 6951 // CHECK9-NEXT: [[TMP:%.*]] = alloca i64, align 8 6952 // CHECK9-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 6953 // CHECK9-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 6954 // CHECK9-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 6955 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 6956 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 6957 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 6958 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6959 // CHECK9-NEXT: [[IT:%.*]] = alloca i64, align 8 6960 // CHECK9-NEXT: [[LIN4:%.*]] = alloca i32, align 4 6961 // CHECK9-NEXT: [[A5:%.*]] = alloca i32, align 4 6962 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6963 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6964 // CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 6965 // CHECK9-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 6966 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 6967 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 6968 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 6969 // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 6970 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 6971 // CHECK9-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 6972 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 6973 // CHECK9-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 6974 // CHECK9-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 6975 // CHECK9-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 6976 // CHECK9-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 6977 // CHECK9-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 6978 // CHECK9-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 6979 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6980 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 6981 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 6982 // CHECK9-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 6983 // CHECK9-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 6984 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6985 // CHECK9-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 6986 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6987 // CHECK9: cond.true: 6988 // CHECK9-NEXT: br label [[COND_END:%.*]] 6989 // CHECK9: cond.false: 6990 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 6991 // CHECK9-NEXT: br label [[COND_END]] 6992 // CHECK9: cond.end: 6993 // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 6994 // CHECK9-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 6995 // CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 6996 // CHECK9-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 6997 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6998 // CHECK9: omp.inner.for.cond: 6999 // CHECK9-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7000 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7001 // CHECK9-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 7002 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7003 // CHECK9: omp.inner.for.body: 7004 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7005 // CHECK9-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 7006 // CHECK9-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 7007 // CHECK9-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 7008 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 7009 // CHECK9-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 7010 // CHECK9-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7011 // CHECK9-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 7012 // CHECK9-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 7013 // CHECK9-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 7014 // CHECK9-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 7015 // CHECK9-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4 7016 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 7017 // CHECK9-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 7018 // CHECK9-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7019 // CHECK9-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 7020 // CHECK9-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 7021 // CHECK9-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 7022 // CHECK9-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 7023 // CHECK9-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4 7024 // CHECK9-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8 7025 // CHECK9-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 7026 // CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 7027 // CHECK9-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 7028 // CHECK9-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8 7029 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7030 // CHECK9: omp.body.continue: 7031 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7032 // CHECK9: omp.inner.for.inc: 7033 // CHECK9-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7034 // CHECK9-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 7035 // CHECK9-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8 7036 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] 7037 // CHECK9: omp.inner.for.end: 7038 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7039 // CHECK9: omp.loop.exit: 7040 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 7041 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7042 // CHECK9-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 7043 // CHECK9-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 7044 // CHECK9: .omp.linear.pu: 7045 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 7046 // CHECK9-NEXT: [[CONV18:%.*]] = sext i32 [[TMP20]] to i64 7047 // CHECK9-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 7048 // CHECK9-NEXT: [[MUL19:%.*]] = mul i64 4, [[TMP21]] 7049 // CHECK9-NEXT: [[ADD20:%.*]] = add i64 [[CONV18]], [[MUL19]] 7050 // CHECK9-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD20]] to i32 7051 // CHECK9-NEXT: store i32 [[CONV21]], i32* [[CONV1]], align 8 7052 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 7053 // CHECK9-NEXT: [[CONV22:%.*]] = sext i32 [[TMP22]] to i64 7054 // CHECK9-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 7055 // CHECK9-NEXT: [[MUL23:%.*]] = mul i64 4, [[TMP23]] 7056 // CHECK9-NEXT: [[ADD24:%.*]] = add i64 [[CONV22]], [[MUL23]] 7057 // CHECK9-NEXT: [[CONV25:%.*]] = trunc i64 [[ADD24]] to i32 7058 // CHECK9-NEXT: store i32 [[CONV25]], i32* [[CONV2]], align 8 7059 // CHECK9-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 7060 // CHECK9: .omp.linear.pu.done: 7061 // CHECK9-NEXT: ret void 7062 // 7063 // 7064 // CHECK9-LABEL: define {{[^@]+}}@_Z7get_valv 7065 // CHECK9-SAME: () #[[ATTR3:[0-9]+]] { 7066 // CHECK9-NEXT: entry: 7067 // CHECK9-NEXT: ret i64 0 7068 // 7069 // 7070 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 7071 // CHECK9-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] { 7072 // CHECK9-NEXT: entry: 7073 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7074 // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7075 // CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7076 // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 7077 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7078 // CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7079 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7080 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7081 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 7082 // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7083 // CHECK9-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 7084 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 7085 // CHECK9-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 7086 // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 7087 // CHECK9-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 7088 // CHECK9-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 7089 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 7090 // CHECK9-NEXT: ret void 7091 // 7092 // 7093 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..2 7094 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR1]] { 7095 // CHECK9-NEXT: entry: 7096 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7097 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7098 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7099 // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7100 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7101 // CHECK9-NEXT: [[TMP:%.*]] = alloca i16, align 2 7102 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7103 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7104 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7105 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7106 // CHECK9-NEXT: [[IT:%.*]] = alloca i16, align 2 7107 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7108 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7109 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7110 // CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7111 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7112 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7113 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7114 // CHECK9-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 7115 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7116 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7117 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7118 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 7119 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7120 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7121 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 7122 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7123 // CHECK9: cond.true: 7124 // CHECK9-NEXT: br label [[COND_END:%.*]] 7125 // CHECK9: cond.false: 7126 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7127 // CHECK9-NEXT: br label [[COND_END]] 7128 // CHECK9: cond.end: 7129 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 7130 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 7131 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7132 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 7133 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7134 // CHECK9: omp.inner.for.cond: 7135 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7136 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7137 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 7138 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7139 // CHECK9: omp.inner.for.body: 7140 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7141 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 7142 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 7143 // CHECK9-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 7144 // CHECK9-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2 7145 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 7146 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 7147 // CHECK9-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8 7148 // CHECK9-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8 7149 // CHECK9-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 7150 // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 7151 // CHECK9-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 7152 // CHECK9-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8 7153 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7154 // CHECK9: omp.body.continue: 7155 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7156 // CHECK9: omp.inner.for.inc: 7157 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7158 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 7159 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 7160 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] 7161 // CHECK9: omp.inner.for.end: 7162 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7163 // CHECK9: omp.loop.exit: 7164 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 7165 // CHECK9-NEXT: ret void 7166 // 7167 // 7168 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 7169 // CHECK9-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 7170 // CHECK9-NEXT: entry: 7171 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7172 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 7173 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 7174 // CHECK9-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 7175 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 7176 // CHECK9-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 7177 // CHECK9-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 7178 // CHECK9-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 7179 // CHECK9-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 7180 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 7181 // CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7182 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 7183 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7184 // CHECK9-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 7185 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 7186 // CHECK9-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 7187 // CHECK9-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 7188 // CHECK9-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 7189 // CHECK9-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 7190 // CHECK9-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 7191 // CHECK9-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 7192 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 7193 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7194 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 7195 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 7196 // CHECK9-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 7197 // CHECK9-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 7198 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 7199 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 7200 // CHECK9-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 7201 // CHECK9-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 7202 // CHECK9-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 7203 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 7204 // CHECK9-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7205 // CHECK9-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 7206 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 7207 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 7208 // CHECK9-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 7209 // CHECK9-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 7210 // CHECK9-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 7211 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 7212 // CHECK9-NEXT: ret void 7213 // 7214 // 7215 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3 7216 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 7217 // CHECK9-NEXT: entry: 7218 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7219 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7220 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7221 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 7222 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 7223 // CHECK9-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 7224 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 7225 // CHECK9-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 7226 // CHECK9-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 7227 // CHECK9-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 7228 // CHECK9-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 7229 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 7230 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7231 // CHECK9-NEXT: [[TMP:%.*]] = alloca i8, align 1 7232 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7233 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7234 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7235 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7236 // CHECK9-NEXT: [[IT:%.*]] = alloca i8, align 1 7237 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7238 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7239 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7240 // CHECK9-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 7241 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 7242 // CHECK9-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 7243 // CHECK9-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 7244 // CHECK9-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 7245 // CHECK9-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 7246 // CHECK9-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 7247 // CHECK9-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 7248 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 7249 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7250 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 7251 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 7252 // CHECK9-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 7253 // CHECK9-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 7254 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 7255 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 7256 // CHECK9-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 7257 // CHECK9-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 7258 // CHECK9-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 7259 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7260 // CHECK9-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 7261 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7262 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7263 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 7264 // CHECK9-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7265 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 7266 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 7267 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 7268 // CHECK9: omp.dispatch.cond: 7269 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7270 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 7271 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7272 // CHECK9: cond.true: 7273 // CHECK9-NEXT: br label [[COND_END:%.*]] 7274 // CHECK9: cond.false: 7275 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7276 // CHECK9-NEXT: br label [[COND_END]] 7277 // CHECK9: cond.end: 7278 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 7279 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 7280 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7281 // CHECK9-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 7282 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7283 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7284 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 7285 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 7286 // CHECK9: omp.dispatch.body: 7287 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7288 // CHECK9: omp.inner.for.cond: 7289 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7290 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7291 // CHECK9-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 7292 // CHECK9-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7293 // CHECK9: omp.inner.for.body: 7294 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7295 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 7296 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 7297 // CHECK9-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 7298 // CHECK9-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1 7299 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8 7300 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 7301 // CHECK9-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 7302 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 7303 // CHECK9-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 7304 // CHECK9-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 7305 // CHECK9-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 7306 // CHECK9-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 7307 // CHECK9-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4 7308 // CHECK9-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 7309 // CHECK9-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4 7310 // CHECK9-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 7311 // CHECK9-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 7312 // CHECK9-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 7313 // CHECK9-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4 7314 // CHECK9-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 7315 // CHECK9-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 7316 // CHECK9-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8 7317 // CHECK9-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 7318 // CHECK9-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8 7319 // CHECK9-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 7320 // CHECK9-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 7321 // CHECK9-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 7322 // CHECK9-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8 7323 // CHECK9-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 7324 // CHECK9-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8 7325 // CHECK9-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 7326 // CHECK9-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8 7327 // CHECK9-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 7328 // CHECK9-NEXT: store i64 [[ADD22]], i64* [[X]], align 8 7329 // CHECK9-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 7330 // CHECK9-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8 7331 // CHECK9-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 7332 // CHECK9-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 7333 // CHECK9-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 7334 // CHECK9-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8 7335 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7336 // CHECK9: omp.body.continue: 7337 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7338 // CHECK9: omp.inner.for.inc: 7339 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7340 // CHECK9-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 7341 // CHECK9-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4 7342 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] 7343 // CHECK9: omp.inner.for.end: 7344 // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 7345 // CHECK9: omp.dispatch.inc: 7346 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7347 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 7348 // CHECK9-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 7349 // CHECK9-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 7350 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7351 // CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 7352 // CHECK9-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 7353 // CHECK9-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 7354 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]] 7355 // CHECK9: omp.dispatch.end: 7356 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 7357 // CHECK9-NEXT: ret void 7358 // 7359 // 7360 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 7361 // CHECK9-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 7362 // CHECK9-NEXT: entry: 7363 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7364 // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7365 // CHECK9-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 7366 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 7367 // CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7368 // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 7369 // CHECK9-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 7370 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7371 // CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7372 // CHECK9-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 7373 // CHECK9-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 7374 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7375 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7376 // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 7377 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 7378 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 7379 // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7380 // CHECK9-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 7381 // CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 7382 // CHECK9-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 7383 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 7384 // CHECK9-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 7385 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 7386 // CHECK9-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 7387 // CHECK9-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 7388 // CHECK9-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 7389 // CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 7390 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 7391 // CHECK9-NEXT: ret void 7392 // 7393 // 7394 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..4 7395 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 7396 // CHECK9-NEXT: entry: 7397 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7398 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7399 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7400 // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7401 // CHECK9-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 7402 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 7403 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7404 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7405 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7406 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7407 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7408 // CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7409 // CHECK9-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 7410 // CHECK9-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 7411 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7412 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7413 // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 7414 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 7415 // CHECK9-NEXT: ret void 7416 // 7417 // 7418 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 7419 // CHECK9-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 7420 // CHECK9-NEXT: entry: 7421 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 7422 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 7423 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 7424 // CHECK9-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 7425 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 7426 // CHECK9-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 7427 // CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 7428 // CHECK9-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 7429 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 7430 // CHECK9-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 7431 // CHECK9-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 7432 // CHECK9-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 7433 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 7434 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 7435 // CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 7436 // CHECK9-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 7437 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 7438 // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 7439 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 7440 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 7441 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 7442 // CHECK9-NEXT: ret void 7443 // 7444 // 7445 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 7446 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 7447 // CHECK9-NEXT: entry: 7448 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7449 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7450 // CHECK9-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 7451 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 7452 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 7453 // CHECK9-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 7454 // CHECK9-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 7455 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 7456 // CHECK9-NEXT: [[TMP:%.*]] = alloca i64, align 8 7457 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 7458 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 7459 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 7460 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7461 // CHECK9-NEXT: [[IT:%.*]] = alloca i64, align 8 7462 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7463 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7464 // CHECK9-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 7465 // CHECK9-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 7466 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 7467 // CHECK9-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 7468 // CHECK9-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 7469 // CHECK9-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 7470 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 7471 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 7472 // CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 7473 // CHECK9-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 7474 // CHECK9-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 7475 // CHECK9-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 7476 // CHECK9-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 7477 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7478 // CHECK9-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7479 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 7480 // CHECK9-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 7481 // CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7482 // CHECK9-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 7483 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7484 // CHECK9: cond.true: 7485 // CHECK9-NEXT: br label [[COND_END:%.*]] 7486 // CHECK9: cond.false: 7487 // CHECK9-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7488 // CHECK9-NEXT: br label [[COND_END]] 7489 // CHECK9: cond.end: 7490 // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 7491 // CHECK9-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 7492 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 7493 // CHECK9-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 7494 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7495 // CHECK9: omp.inner.for.cond: 7496 // CHECK9-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7497 // CHECK9-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7498 // CHECK9-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 7499 // CHECK9-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7500 // CHECK9: omp.inner.for.body: 7501 // CHECK9-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7502 // CHECK9-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 7503 // CHECK9-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 7504 // CHECK9-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 7505 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8 7506 // CHECK9-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 7507 // CHECK9-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 7508 // CHECK9-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 7509 // CHECK9-NEXT: store double [[ADD]], double* [[A]], align 8 7510 // CHECK9-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 7511 // CHECK9-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8 7512 // CHECK9-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 7513 // CHECK9-NEXT: store double [[INC]], double* [[A5]], align 8 7514 // CHECK9-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 7515 // CHECK9-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 7516 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 7517 // CHECK9-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 7518 // CHECK9-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2 7519 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7520 // CHECK9: omp.body.continue: 7521 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7522 // CHECK9: omp.inner.for.inc: 7523 // CHECK9-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7524 // CHECK9-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 7525 // CHECK9-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 7526 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] 7527 // CHECK9: omp.inner.for.end: 7528 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7529 // CHECK9: omp.loop.exit: 7530 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 7531 // CHECK9-NEXT: ret void 7532 // 7533 // 7534 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 7535 // CHECK9-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 7536 // CHECK9-NEXT: entry: 7537 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7538 // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7539 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 7540 // CHECK9-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7541 // CHECK9-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 7542 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7543 // CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7544 // CHECK9-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 7545 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7546 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7547 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 7548 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 7549 // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7550 // CHECK9-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 7551 // CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 7552 // CHECK9-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 7553 // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 7554 // CHECK9-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 7555 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 7556 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 7557 // CHECK9-NEXT: ret void 7558 // 7559 // 7560 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 7561 // CHECK9-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 7562 // CHECK9-NEXT: entry: 7563 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7564 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7565 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7566 // CHECK9-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7567 // CHECK9-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 7568 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 7569 // CHECK9-NEXT: [[TMP:%.*]] = alloca i64, align 8 7570 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 7571 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 7572 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 7573 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7574 // CHECK9-NEXT: [[I:%.*]] = alloca i64, align 8 7575 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7576 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7577 // CHECK9-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7578 // CHECK9-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7579 // CHECK9-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 7580 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7581 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7582 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 7583 // CHECK9-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 7584 // CHECK9-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 7585 // CHECK9-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 7586 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7587 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7588 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 7589 // CHECK9-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 7590 // CHECK9-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7591 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 7592 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7593 // CHECK9: cond.true: 7594 // CHECK9-NEXT: br label [[COND_END:%.*]] 7595 // CHECK9: cond.false: 7596 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7597 // CHECK9-NEXT: br label [[COND_END]] 7598 // CHECK9: cond.end: 7599 // CHECK9-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 7600 // CHECK9-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 7601 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 7602 // CHECK9-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 7603 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7604 // CHECK9: omp.inner.for.cond: 7605 // CHECK9-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7606 // CHECK9-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7607 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 7608 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7609 // CHECK9: omp.inner.for.body: 7610 // CHECK9-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7611 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 7612 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 7613 // CHECK9-NEXT: store i64 [[ADD]], i64* [[I]], align 8 7614 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8 7615 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 7616 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8 7617 // CHECK9-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8 7618 // CHECK9-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 7619 // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 7620 // CHECK9-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 7621 // CHECK9-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8 7622 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 7623 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 7624 // CHECK9-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 7625 // CHECK9-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4 7626 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7627 // CHECK9: omp.body.continue: 7628 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7629 // CHECK9: omp.inner.for.inc: 7630 // CHECK9-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7631 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 7632 // CHECK9-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 7633 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]] 7634 // CHECK9: omp.inner.for.end: 7635 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7636 // CHECK9: omp.loop.exit: 7637 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 7638 // CHECK9-NEXT: ret void 7639 // 7640 // 7641 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 7642 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] { 7643 // CHECK10-NEXT: entry: 7644 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 7645 // CHECK10-NEXT: ret void 7646 // 7647 // 7648 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined. 7649 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 7650 // CHECK10-NEXT: entry: 7651 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7652 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7653 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7654 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 7655 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7656 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7657 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7658 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7659 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 7660 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7661 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7662 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7663 // CHECK10-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 7664 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7665 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7666 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7667 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 7668 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7669 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7670 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 7671 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7672 // CHECK10: cond.true: 7673 // CHECK10-NEXT: br label [[COND_END:%.*]] 7674 // CHECK10: cond.false: 7675 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7676 // CHECK10-NEXT: br label [[COND_END]] 7677 // CHECK10: cond.end: 7678 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 7679 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 7680 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7681 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 7682 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7683 // CHECK10: omp.inner.for.cond: 7684 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7685 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7686 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 7687 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7688 // CHECK10: omp.inner.for.body: 7689 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7690 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 7691 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 7692 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4 7693 // CHECK10-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 7694 // CHECK10-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 7695 // CHECK10-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 7696 // CHECK10: .cancel.exit: 7697 // CHECK10-NEXT: br label [[CANCEL_EXIT:%.*]] 7698 // CHECK10: .cancel.continue: 7699 // CHECK10-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 7700 // CHECK10-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 7701 // CHECK10-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 7702 // CHECK10: .cancel.exit2: 7703 // CHECK10-NEXT: br label [[CANCEL_EXIT]] 7704 // CHECK10: .cancel.continue3: 7705 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7706 // CHECK10: omp.body.continue: 7707 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7708 // CHECK10: omp.inner.for.inc: 7709 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7710 // CHECK10-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 7711 // CHECK10-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 7712 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]] 7713 // CHECK10: omp.inner.for.end: 7714 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7715 // CHECK10: omp.loop.exit: 7716 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 7717 // CHECK10-NEXT: br label [[CANCEL_CONT:%.*]] 7718 // CHECK10: cancel.cont: 7719 // CHECK10-NEXT: ret void 7720 // CHECK10: cancel.exit: 7721 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 7722 // CHECK10-NEXT: br label [[CANCEL_CONT]] 7723 // 7724 // 7725 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 7726 // CHECK10-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR0]] { 7727 // CHECK10-NEXT: entry: 7728 // CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7729 // CHECK10-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 7730 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7731 // CHECK10-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 7732 // CHECK10-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 7733 // CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7734 // CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7735 // CHECK10-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 7736 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7737 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7738 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 7739 // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7740 // CHECK10-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 7741 // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 7742 // CHECK10-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 7743 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 7744 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 7745 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 7746 // CHECK10-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 7747 // CHECK10-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 7748 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 7749 // CHECK10-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7750 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 7751 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 7752 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 7753 // CHECK10-NEXT: ret void 7754 // 7755 // 7756 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..1 7757 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR1]] { 7758 // CHECK10-NEXT: entry: 7759 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7760 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7761 // CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7762 // CHECK10-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 7763 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7764 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 7765 // CHECK10-NEXT: [[TMP:%.*]] = alloca i64, align 8 7766 // CHECK10-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 7767 // CHECK10-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 7768 // CHECK10-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 7769 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 7770 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 7771 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 7772 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7773 // CHECK10-NEXT: [[IT:%.*]] = alloca i64, align 8 7774 // CHECK10-NEXT: [[LIN4:%.*]] = alloca i32, align 4 7775 // CHECK10-NEXT: [[A5:%.*]] = alloca i32, align 4 7776 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7777 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7778 // CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7779 // CHECK10-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 7780 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7781 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7782 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 7783 // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7784 // CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 7785 // CHECK10-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 7786 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 7787 // CHECK10-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 7788 // CHECK10-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 7789 // CHECK10-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 7790 // CHECK10-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 7791 // CHECK10-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 7792 // CHECK10-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 7793 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7794 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7795 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 7796 // CHECK10-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 7797 // CHECK10-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 7798 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7799 // CHECK10-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 7800 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7801 // CHECK10: cond.true: 7802 // CHECK10-NEXT: br label [[COND_END:%.*]] 7803 // CHECK10: cond.false: 7804 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7805 // CHECK10-NEXT: br label [[COND_END]] 7806 // CHECK10: cond.end: 7807 // CHECK10-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 7808 // CHECK10-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 7809 // CHECK10-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 7810 // CHECK10-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 7811 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7812 // CHECK10: omp.inner.for.cond: 7813 // CHECK10-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7814 // CHECK10-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 7815 // CHECK10-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 7816 // CHECK10-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7817 // CHECK10: omp.inner.for.body: 7818 // CHECK10-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7819 // CHECK10-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 7820 // CHECK10-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 7821 // CHECK10-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 7822 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 7823 // CHECK10-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 7824 // CHECK10-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7825 // CHECK10-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 7826 // CHECK10-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 7827 // CHECK10-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 7828 // CHECK10-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 7829 // CHECK10-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4 7830 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 7831 // CHECK10-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 7832 // CHECK10-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7833 // CHECK10-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 7834 // CHECK10-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 7835 // CHECK10-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 7836 // CHECK10-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 7837 // CHECK10-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4 7838 // CHECK10-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8 7839 // CHECK10-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 7840 // CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 7841 // CHECK10-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 7842 // CHECK10-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8 7843 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7844 // CHECK10: omp.body.continue: 7845 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7846 // CHECK10: omp.inner.for.inc: 7847 // CHECK10-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 7848 // CHECK10-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 7849 // CHECK10-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8 7850 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]] 7851 // CHECK10: omp.inner.for.end: 7852 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7853 // CHECK10: omp.loop.exit: 7854 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 7855 // CHECK10-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7856 // CHECK10-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 7857 // CHECK10-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 7858 // CHECK10: .omp.linear.pu: 7859 // CHECK10-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 7860 // CHECK10-NEXT: [[CONV18:%.*]] = sext i32 [[TMP20]] to i64 7861 // CHECK10-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 7862 // CHECK10-NEXT: [[MUL19:%.*]] = mul i64 4, [[TMP21]] 7863 // CHECK10-NEXT: [[ADD20:%.*]] = add i64 [[CONV18]], [[MUL19]] 7864 // CHECK10-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD20]] to i32 7865 // CHECK10-NEXT: store i32 [[CONV21]], i32* [[CONV1]], align 8 7866 // CHECK10-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 7867 // CHECK10-NEXT: [[CONV22:%.*]] = sext i32 [[TMP22]] to i64 7868 // CHECK10-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 7869 // CHECK10-NEXT: [[MUL23:%.*]] = mul i64 4, [[TMP23]] 7870 // CHECK10-NEXT: [[ADD24:%.*]] = add i64 [[CONV22]], [[MUL23]] 7871 // CHECK10-NEXT: [[CONV25:%.*]] = trunc i64 [[ADD24]] to i32 7872 // CHECK10-NEXT: store i32 [[CONV25]], i32* [[CONV2]], align 8 7873 // CHECK10-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 7874 // CHECK10: .omp.linear.pu.done: 7875 // CHECK10-NEXT: ret void 7876 // 7877 // 7878 // CHECK10-LABEL: define {{[^@]+}}@_Z7get_valv 7879 // CHECK10-SAME: () #[[ATTR3:[0-9]+]] { 7880 // CHECK10-NEXT: entry: 7881 // CHECK10-NEXT: ret i64 0 7882 // 7883 // 7884 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 7885 // CHECK10-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] { 7886 // CHECK10-NEXT: entry: 7887 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7888 // CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7889 // CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7890 // CHECK10-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 7891 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7892 // CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7893 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7894 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7895 // CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 7896 // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 7897 // CHECK10-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 7898 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 7899 // CHECK10-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 7900 // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 7901 // CHECK10-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 7902 // CHECK10-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 7903 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 7904 // CHECK10-NEXT: ret void 7905 // 7906 // 7907 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..2 7908 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR1]] { 7909 // CHECK10-NEXT: entry: 7910 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7911 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7912 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7913 // CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 7914 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7915 // CHECK10-NEXT: [[TMP:%.*]] = alloca i16, align 2 7916 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7917 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7918 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7919 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7920 // CHECK10-NEXT: [[IT:%.*]] = alloca i16, align 2 7921 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7922 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7923 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7924 // CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 7925 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 7926 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 7927 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7928 // CHECK10-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 7929 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7930 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7931 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7932 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 7933 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7934 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7935 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 7936 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7937 // CHECK10: cond.true: 7938 // CHECK10-NEXT: br label [[COND_END:%.*]] 7939 // CHECK10: cond.false: 7940 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7941 // CHECK10-NEXT: br label [[COND_END]] 7942 // CHECK10: cond.end: 7943 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 7944 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 7945 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7946 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 7947 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7948 // CHECK10: omp.inner.for.cond: 7949 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7950 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7951 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 7952 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7953 // CHECK10: omp.inner.for.body: 7954 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7955 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 7956 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 7957 // CHECK10-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 7958 // CHECK10-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2 7959 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 7960 // CHECK10-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 7961 // CHECK10-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8 7962 // CHECK10-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8 7963 // CHECK10-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 7964 // CHECK10-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 7965 // CHECK10-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 7966 // CHECK10-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8 7967 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7968 // CHECK10: omp.body.continue: 7969 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7970 // CHECK10: omp.inner.for.inc: 7971 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 7972 // CHECK10-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 7973 // CHECK10-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 7974 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]] 7975 // CHECK10: omp.inner.for.end: 7976 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7977 // CHECK10: omp.loop.exit: 7978 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 7979 // CHECK10-NEXT: ret void 7980 // 7981 // 7982 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 7983 // CHECK10-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 7984 // CHECK10-NEXT: entry: 7985 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 7986 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 7987 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 7988 // CHECK10-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 7989 // CHECK10-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 7990 // CHECK10-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 7991 // CHECK10-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 7992 // CHECK10-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 7993 // CHECK10-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 7994 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 7995 // CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 7996 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 7997 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 7998 // CHECK10-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 7999 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 8000 // CHECK10-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 8001 // CHECK10-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 8002 // CHECK10-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 8003 // CHECK10-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 8004 // CHECK10-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 8005 // CHECK10-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 8006 // CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 8007 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8008 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 8009 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 8010 // CHECK10-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 8011 // CHECK10-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 8012 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 8013 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 8014 // CHECK10-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 8015 // CHECK10-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 8016 // CHECK10-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 8017 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 8018 // CHECK10-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 8019 // CHECK10-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 8020 // CHECK10-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 8021 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 8022 // CHECK10-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 8023 // CHECK10-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 8024 // CHECK10-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 8025 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 8026 // CHECK10-NEXT: ret void 8027 // 8028 // 8029 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3 8030 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 8031 // CHECK10-NEXT: entry: 8032 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8033 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8034 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8035 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 8036 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 8037 // CHECK10-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 8038 // CHECK10-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 8039 // CHECK10-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 8040 // CHECK10-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 8041 // CHECK10-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 8042 // CHECK10-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 8043 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 8044 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8045 // CHECK10-NEXT: [[TMP:%.*]] = alloca i8, align 1 8046 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8047 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8048 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8049 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8050 // CHECK10-NEXT: [[IT:%.*]] = alloca i8, align 1 8051 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8052 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8053 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8054 // CHECK10-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 8055 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 8056 // CHECK10-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 8057 // CHECK10-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 8058 // CHECK10-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 8059 // CHECK10-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 8060 // CHECK10-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 8061 // CHECK10-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 8062 // CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 8063 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8064 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 8065 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 8066 // CHECK10-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 8067 // CHECK10-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 8068 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 8069 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 8070 // CHECK10-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 8071 // CHECK10-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 8072 // CHECK10-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 8073 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8074 // CHECK10-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 8075 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8076 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8077 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 8078 // CHECK10-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8079 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 8080 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 8081 // CHECK10-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 8082 // CHECK10: omp.dispatch.cond: 8083 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8084 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 8085 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8086 // CHECK10: cond.true: 8087 // CHECK10-NEXT: br label [[COND_END:%.*]] 8088 // CHECK10: cond.false: 8089 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8090 // CHECK10-NEXT: br label [[COND_END]] 8091 // CHECK10: cond.end: 8092 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 8093 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8094 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8095 // CHECK10-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 8096 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8097 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8098 // CHECK10-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 8099 // CHECK10-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 8100 // CHECK10: omp.dispatch.body: 8101 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8102 // CHECK10: omp.inner.for.cond: 8103 // CHECK10-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8104 // CHECK10-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8105 // CHECK10-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 8106 // CHECK10-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8107 // CHECK10: omp.inner.for.body: 8108 // CHECK10-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8109 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 8110 // CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 8111 // CHECK10-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 8112 // CHECK10-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1 8113 // CHECK10-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8 8114 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 8115 // CHECK10-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 8116 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 8117 // CHECK10-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 8118 // CHECK10-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 8119 // CHECK10-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 8120 // CHECK10-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 8121 // CHECK10-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4 8122 // CHECK10-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 8123 // CHECK10-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4 8124 // CHECK10-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 8125 // CHECK10-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 8126 // CHECK10-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 8127 // CHECK10-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4 8128 // CHECK10-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 8129 // CHECK10-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 8130 // CHECK10-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8 8131 // CHECK10-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 8132 // CHECK10-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8 8133 // CHECK10-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 8134 // CHECK10-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 8135 // CHECK10-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 8136 // CHECK10-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8 8137 // CHECK10-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 8138 // CHECK10-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8 8139 // CHECK10-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 8140 // CHECK10-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8 8141 // CHECK10-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 8142 // CHECK10-NEXT: store i64 [[ADD22]], i64* [[X]], align 8 8143 // CHECK10-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 8144 // CHECK10-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8 8145 // CHECK10-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 8146 // CHECK10-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 8147 // CHECK10-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 8148 // CHECK10-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8 8149 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8150 // CHECK10: omp.body.continue: 8151 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8152 // CHECK10: omp.inner.for.inc: 8153 // CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8154 // CHECK10-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 8155 // CHECK10-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4 8156 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]] 8157 // CHECK10: omp.inner.for.end: 8158 // CHECK10-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 8159 // CHECK10: omp.dispatch.inc: 8160 // CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8161 // CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 8162 // CHECK10-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 8163 // CHECK10-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 8164 // CHECK10-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8165 // CHECK10-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 8166 // CHECK10-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 8167 // CHECK10-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 8168 // CHECK10-NEXT: br label [[OMP_DISPATCH_COND]] 8169 // CHECK10: omp.dispatch.end: 8170 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 8171 // CHECK10-NEXT: ret void 8172 // 8173 // 8174 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 8175 // CHECK10-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 8176 // CHECK10-NEXT: entry: 8177 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8178 // CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8179 // CHECK10-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 8180 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 8181 // CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 8182 // CHECK10-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 8183 // CHECK10-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 8184 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8185 // CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8186 // CHECK10-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 8187 // CHECK10-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 8188 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8189 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8190 // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 8191 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 8192 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 8193 // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 8194 // CHECK10-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 8195 // CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 8196 // CHECK10-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 8197 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 8198 // CHECK10-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 8199 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 8200 // CHECK10-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 8201 // CHECK10-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 8202 // CHECK10-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 8203 // CHECK10-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 8204 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 8205 // CHECK10-NEXT: ret void 8206 // 8207 // 8208 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..4 8209 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 8210 // CHECK10-NEXT: entry: 8211 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8212 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8213 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8214 // CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8215 // CHECK10-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 8216 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 8217 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8218 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 8219 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8220 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8221 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8222 // CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8223 // CHECK10-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 8224 // CHECK10-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 8225 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8226 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8227 // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 8228 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 8229 // CHECK10-NEXT: ret void 8230 // 8231 // 8232 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 8233 // CHECK10-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 8234 // CHECK10-NEXT: entry: 8235 // CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 8236 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 8237 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 8238 // CHECK10-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 8239 // CHECK10-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 8240 // CHECK10-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 8241 // CHECK10-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 8242 // CHECK10-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 8243 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 8244 // CHECK10-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 8245 // CHECK10-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 8246 // CHECK10-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 8247 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 8248 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 8249 // CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 8250 // CHECK10-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 8251 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 8252 // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 8253 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 8254 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 8255 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 8256 // CHECK10-NEXT: ret void 8257 // 8258 // 8259 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 8260 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 8261 // CHECK10-NEXT: entry: 8262 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8263 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8264 // CHECK10-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 8265 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 8266 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 8267 // CHECK10-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 8268 // CHECK10-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 8269 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 8270 // CHECK10-NEXT: [[TMP:%.*]] = alloca i64, align 8 8271 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 8272 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 8273 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 8274 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8275 // CHECK10-NEXT: [[IT:%.*]] = alloca i64, align 8 8276 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8277 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8278 // CHECK10-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 8279 // CHECK10-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 8280 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 8281 // CHECK10-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 8282 // CHECK10-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 8283 // CHECK10-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 8284 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 8285 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 8286 // CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 8287 // CHECK10-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 8288 // CHECK10-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 8289 // CHECK10-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 8290 // CHECK10-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 8291 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8292 // CHECK10-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8293 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 8294 // CHECK10-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 8295 // CHECK10-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8296 // CHECK10-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 8297 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8298 // CHECK10: cond.true: 8299 // CHECK10-NEXT: br label [[COND_END:%.*]] 8300 // CHECK10: cond.false: 8301 // CHECK10-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8302 // CHECK10-NEXT: br label [[COND_END]] 8303 // CHECK10: cond.end: 8304 // CHECK10-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 8305 // CHECK10-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 8306 // CHECK10-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 8307 // CHECK10-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 8308 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8309 // CHECK10: omp.inner.for.cond: 8310 // CHECK10-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8311 // CHECK10-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8312 // CHECK10-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 8313 // CHECK10-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8314 // CHECK10: omp.inner.for.body: 8315 // CHECK10-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8316 // CHECK10-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 8317 // CHECK10-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 8318 // CHECK10-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 8319 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8 8320 // CHECK10-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 8321 // CHECK10-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 8322 // CHECK10-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 8323 // CHECK10-NEXT: store double [[ADD]], double* [[A]], align 8 8324 // CHECK10-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 8325 // CHECK10-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8 8326 // CHECK10-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 8327 // CHECK10-NEXT: store double [[INC]], double* [[A5]], align 8 8328 // CHECK10-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 8329 // CHECK10-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 8330 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 8331 // CHECK10-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 8332 // CHECK10-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2 8333 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8334 // CHECK10: omp.body.continue: 8335 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8336 // CHECK10: omp.inner.for.inc: 8337 // CHECK10-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8338 // CHECK10-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 8339 // CHECK10-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 8340 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]] 8341 // CHECK10: omp.inner.for.end: 8342 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8343 // CHECK10: omp.loop.exit: 8344 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 8345 // CHECK10-NEXT: ret void 8346 // 8347 // 8348 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 8349 // CHECK10-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 8350 // CHECK10-NEXT: entry: 8351 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8352 // CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8353 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 8354 // CHECK10-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 8355 // CHECK10-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 8356 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8357 // CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8358 // CHECK10-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 8359 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8360 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8361 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 8362 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 8363 // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 8364 // CHECK10-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 8365 // CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 8366 // CHECK10-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 8367 // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 8368 // CHECK10-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 8369 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 8370 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 8371 // CHECK10-NEXT: ret void 8372 // 8373 // 8374 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 8375 // CHECK10-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 8376 // CHECK10-NEXT: entry: 8377 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8378 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8379 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 8380 // CHECK10-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 8381 // CHECK10-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 8382 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 8383 // CHECK10-NEXT: [[TMP:%.*]] = alloca i64, align 8 8384 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 8385 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 8386 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 8387 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8388 // CHECK10-NEXT: [[I:%.*]] = alloca i64, align 8 8389 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8390 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8391 // CHECK10-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 8392 // CHECK10-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 8393 // CHECK10-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 8394 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 8395 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 8396 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 8397 // CHECK10-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 8398 // CHECK10-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 8399 // CHECK10-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 8400 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8401 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8402 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 8403 // CHECK10-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 8404 // CHECK10-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8405 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 8406 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8407 // CHECK10: cond.true: 8408 // CHECK10-NEXT: br label [[COND_END:%.*]] 8409 // CHECK10: cond.false: 8410 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8411 // CHECK10-NEXT: br label [[COND_END]] 8412 // CHECK10: cond.end: 8413 // CHECK10-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 8414 // CHECK10-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 8415 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 8416 // CHECK10-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 8417 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8418 // CHECK10: omp.inner.for.cond: 8419 // CHECK10-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8420 // CHECK10-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8421 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 8422 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8423 // CHECK10: omp.inner.for.body: 8424 // CHECK10-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8425 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 8426 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 8427 // CHECK10-NEXT: store i64 [[ADD]], i64* [[I]], align 8 8428 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8 8429 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 8430 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8 8431 // CHECK10-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8 8432 // CHECK10-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 8433 // CHECK10-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 8434 // CHECK10-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 8435 // CHECK10-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8 8436 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 8437 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 8438 // CHECK10-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 8439 // CHECK10-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4 8440 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8441 // CHECK10: omp.body.continue: 8442 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8443 // CHECK10: omp.inner.for.inc: 8444 // CHECK10-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8445 // CHECK10-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 8446 // CHECK10-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 8447 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]] 8448 // CHECK10: omp.inner.for.end: 8449 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8450 // CHECK10: omp.loop.exit: 8451 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 8452 // CHECK10-NEXT: ret void 8453 // 8454 // 8455 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 8456 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] { 8457 // CHECK11-NEXT: entry: 8458 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 8459 // CHECK11-NEXT: ret void 8460 // 8461 // 8462 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined. 8463 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 8464 // CHECK11-NEXT: entry: 8465 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 8466 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 8467 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8468 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 8469 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8470 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8471 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8472 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8473 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 8474 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 8475 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 8476 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8477 // CHECK11-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 8478 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8479 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8480 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 8481 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 8482 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8483 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8484 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 8485 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8486 // CHECK11: cond.true: 8487 // CHECK11-NEXT: br label [[COND_END:%.*]] 8488 // CHECK11: cond.false: 8489 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8490 // CHECK11-NEXT: br label [[COND_END]] 8491 // CHECK11: cond.end: 8492 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 8493 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8494 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8495 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 8496 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8497 // CHECK11: omp.inner.for.cond: 8498 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8499 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8500 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 8501 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8502 // CHECK11: omp.inner.for.body: 8503 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8504 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 8505 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 8506 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4 8507 // CHECK11-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 8508 // CHECK11-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 8509 // CHECK11-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 8510 // CHECK11: .cancel.exit: 8511 // CHECK11-NEXT: br label [[CANCEL_EXIT:%.*]] 8512 // CHECK11: .cancel.continue: 8513 // CHECK11-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 8514 // CHECK11-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 8515 // CHECK11-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 8516 // CHECK11: .cancel.exit2: 8517 // CHECK11-NEXT: br label [[CANCEL_EXIT]] 8518 // CHECK11: .cancel.continue3: 8519 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8520 // CHECK11: omp.body.continue: 8521 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8522 // CHECK11: omp.inner.for.inc: 8523 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8524 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 8525 // CHECK11-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 8526 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] 8527 // CHECK11: omp.inner.for.end: 8528 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8529 // CHECK11: omp.loop.exit: 8530 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 8531 // CHECK11-NEXT: br label [[CANCEL_CONT:%.*]] 8532 // CHECK11: cancel.cont: 8533 // CHECK11-NEXT: ret void 8534 // CHECK11: cancel.exit: 8535 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 8536 // CHECK11-NEXT: br label [[CANCEL_CONT]] 8537 // 8538 // 8539 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 8540 // CHECK11-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR0]] { 8541 // CHECK11-NEXT: entry: 8542 // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 8543 // CHECK11-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 8544 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 8545 // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 8546 // CHECK11-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 8547 // CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 8548 // CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 8549 // CHECK11-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 8550 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 8551 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 8552 // CHECK11-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 8553 // CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 8554 // CHECK11-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 8555 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 8556 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 8557 // CHECK11-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 8558 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 8559 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 8560 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 8561 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 8562 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 8563 // CHECK11-NEXT: ret void 8564 // 8565 // 8566 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1 8567 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR1]] { 8568 // CHECK11-NEXT: entry: 8569 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 8570 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 8571 // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 8572 // CHECK11-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 8573 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 8574 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 8575 // CHECK11-NEXT: [[TMP:%.*]] = alloca i64, align 4 8576 // CHECK11-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 8577 // CHECK11-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 8578 // CHECK11-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 8579 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 8580 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 8581 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 8582 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8583 // CHECK11-NEXT: [[IT:%.*]] = alloca i64, align 8 8584 // CHECK11-NEXT: [[LIN2:%.*]] = alloca i32, align 4 8585 // CHECK11-NEXT: [[A3:%.*]] = alloca i32, align 4 8586 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 8587 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 8588 // CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 8589 // CHECK11-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 8590 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 8591 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 8592 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 8593 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 8594 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 8595 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 8596 // CHECK11-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 8597 // CHECK11-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 8598 // CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 8599 // CHECK11-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 8600 // CHECK11-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 8601 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8602 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 8603 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 8604 // CHECK11-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 8605 // CHECK11-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 8606 // CHECK11-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8607 // CHECK11-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 8608 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8609 // CHECK11: cond.true: 8610 // CHECK11-NEXT: br label [[COND_END:%.*]] 8611 // CHECK11: cond.false: 8612 // CHECK11-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8613 // CHECK11-NEXT: br label [[COND_END]] 8614 // CHECK11: cond.end: 8615 // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 8616 // CHECK11-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 8617 // CHECK11-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 8618 // CHECK11-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 8619 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8620 // CHECK11: omp.inner.for.cond: 8621 // CHECK11-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8622 // CHECK11-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 8623 // CHECK11-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 8624 // CHECK11-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8625 // CHECK11: omp.inner.for.body: 8626 // CHECK11-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8627 // CHECK11-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 8628 // CHECK11-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 8629 // CHECK11-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 8630 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 8631 // CHECK11-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 8632 // CHECK11-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8633 // CHECK11-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 8634 // CHECK11-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 8635 // CHECK11-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 8636 // CHECK11-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 8637 // CHECK11-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4 8638 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 8639 // CHECK11-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 8640 // CHECK11-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8641 // CHECK11-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 8642 // CHECK11-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 8643 // CHECK11-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 8644 // CHECK11-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 8645 // CHECK11-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4 8646 // CHECK11-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4 8647 // CHECK11-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 8648 // CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 8649 // CHECK11-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 8650 // CHECK11-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4 8651 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8652 // CHECK11: omp.body.continue: 8653 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8654 // CHECK11: omp.inner.for.inc: 8655 // CHECK11-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 8656 // CHECK11-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 8657 // CHECK11-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8 8658 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] 8659 // CHECK11: omp.inner.for.end: 8660 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8661 // CHECK11: omp.loop.exit: 8662 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 8663 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8664 // CHECK11-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 8665 // CHECK11-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 8666 // CHECK11: .omp.linear.pu: 8667 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 8668 // CHECK11-NEXT: [[CONV16:%.*]] = sext i32 [[TMP20]] to i64 8669 // CHECK11-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 8670 // CHECK11-NEXT: [[MUL17:%.*]] = mul i64 4, [[TMP21]] 8671 // CHECK11-NEXT: [[ADD18:%.*]] = add i64 [[CONV16]], [[MUL17]] 8672 // CHECK11-NEXT: [[CONV19:%.*]] = trunc i64 [[ADD18]] to i32 8673 // CHECK11-NEXT: store i32 [[CONV19]], i32* [[LIN_ADDR]], align 4 8674 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 8675 // CHECK11-NEXT: [[CONV20:%.*]] = sext i32 [[TMP22]] to i64 8676 // CHECK11-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 8677 // CHECK11-NEXT: [[MUL21:%.*]] = mul i64 4, [[TMP23]] 8678 // CHECK11-NEXT: [[ADD22:%.*]] = add i64 [[CONV20]], [[MUL21]] 8679 // CHECK11-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD22]] to i32 8680 // CHECK11-NEXT: store i32 [[CONV23]], i32* [[A_ADDR]], align 4 8681 // CHECK11-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 8682 // CHECK11: .omp.linear.pu.done: 8683 // CHECK11-NEXT: ret void 8684 // 8685 // 8686 // CHECK11-LABEL: define {{[^@]+}}@_Z7get_valv 8687 // CHECK11-SAME: () #[[ATTR3:[0-9]+]] { 8688 // CHECK11-NEXT: entry: 8689 // CHECK11-NEXT: ret i64 0 8690 // 8691 // 8692 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 8693 // CHECK11-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] { 8694 // CHECK11-NEXT: entry: 8695 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 8696 // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 8697 // CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 8698 // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 8699 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 8700 // CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 8701 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 8702 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 8703 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 8704 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 8705 // CHECK11-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 8706 // CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 8707 // CHECK11-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 8708 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 8709 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 8710 // CHECK11-NEXT: ret void 8711 // 8712 // 8713 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..2 8714 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR1]] { 8715 // CHECK11-NEXT: entry: 8716 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 8717 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 8718 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 8719 // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 8720 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8721 // CHECK11-NEXT: [[TMP:%.*]] = alloca i16, align 2 8722 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8723 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8724 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8725 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8726 // CHECK11-NEXT: [[IT:%.*]] = alloca i16, align 2 8727 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 8728 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 8729 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 8730 // CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 8731 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 8732 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8733 // CHECK11-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 8734 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8735 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8736 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 8737 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 8738 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8739 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8740 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 8741 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8742 // CHECK11: cond.true: 8743 // CHECK11-NEXT: br label [[COND_END:%.*]] 8744 // CHECK11: cond.false: 8745 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8746 // CHECK11-NEXT: br label [[COND_END]] 8747 // CHECK11: cond.end: 8748 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 8749 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8750 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8751 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 8752 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8753 // CHECK11: omp.inner.for.cond: 8754 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8755 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8756 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 8757 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8758 // CHECK11: omp.inner.for.body: 8759 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8760 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 8761 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 8762 // CHECK11-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 8763 // CHECK11-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2 8764 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 8765 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 8766 // CHECK11-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4 8767 // CHECK11-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4 8768 // CHECK11-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 8769 // CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 8770 // CHECK11-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 8771 // CHECK11-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4 8772 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8773 // CHECK11: omp.body.continue: 8774 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8775 // CHECK11: omp.inner.for.inc: 8776 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8777 // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 8778 // CHECK11-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 8779 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] 8780 // CHECK11: omp.inner.for.end: 8781 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8782 // CHECK11: omp.loop.exit: 8783 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 8784 // CHECK11-NEXT: ret void 8785 // 8786 // 8787 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 8788 // CHECK11-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 8789 // CHECK11-NEXT: entry: 8790 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 8791 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 8792 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 8793 // CHECK11-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 8794 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 8795 // CHECK11-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 8796 // CHECK11-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 8797 // CHECK11-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 8798 // CHECK11-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 8799 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 8800 // CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 8801 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 8802 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 8803 // CHECK11-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 8804 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 8805 // CHECK11-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 8806 // CHECK11-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 8807 // CHECK11-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 8808 // CHECK11-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 8809 // CHECK11-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 8810 // CHECK11-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 8811 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 8812 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 8813 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 8814 // CHECK11-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 8815 // CHECK11-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 8816 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 8817 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 8818 // CHECK11-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 8819 // CHECK11-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 8820 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 8821 // CHECK11-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 8822 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 8823 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 8824 // CHECK11-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 8825 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 8826 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 8827 // CHECK11-NEXT: ret void 8828 // 8829 // 8830 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3 8831 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 8832 // CHECK11-NEXT: entry: 8833 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 8834 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 8835 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 8836 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 8837 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 8838 // CHECK11-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 8839 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 8840 // CHECK11-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 8841 // CHECK11-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 8842 // CHECK11-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 8843 // CHECK11-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 8844 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 8845 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8846 // CHECK11-NEXT: [[TMP:%.*]] = alloca i8, align 1 8847 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8848 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8849 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8850 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8851 // CHECK11-NEXT: [[IT:%.*]] = alloca i8, align 1 8852 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 8853 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 8854 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 8855 // CHECK11-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 8856 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 8857 // CHECK11-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 8858 // CHECK11-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 8859 // CHECK11-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 8860 // CHECK11-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 8861 // CHECK11-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 8862 // CHECK11-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 8863 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 8864 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 8865 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 8866 // CHECK11-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 8867 // CHECK11-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 8868 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 8869 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 8870 // CHECK11-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 8871 // CHECK11-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 8872 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8873 // CHECK11-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 8874 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8875 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8876 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 8877 // CHECK11-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 8878 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 8879 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 8880 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 8881 // CHECK11: omp.dispatch.cond: 8882 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8883 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 8884 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8885 // CHECK11: cond.true: 8886 // CHECK11-NEXT: br label [[COND_END:%.*]] 8887 // CHECK11: cond.false: 8888 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8889 // CHECK11-NEXT: br label [[COND_END]] 8890 // CHECK11: cond.end: 8891 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 8892 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8893 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8894 // CHECK11-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 8895 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8896 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8897 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 8898 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 8899 // CHECK11: omp.dispatch.body: 8900 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8901 // CHECK11: omp.inner.for.cond: 8902 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8903 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8904 // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 8905 // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8906 // CHECK11: omp.inner.for.body: 8907 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8908 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 8909 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 8910 // CHECK11-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 8911 // CHECK11-NEXT: store i8 [[CONV]], i8* [[IT]], align 1 8912 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4 8913 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 8914 // CHECK11-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 8915 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 8916 // CHECK11-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 8917 // CHECK11-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 8918 // CHECK11-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 8919 // CHECK11-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 8920 // CHECK11-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4 8921 // CHECK11-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 8922 // CHECK11-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4 8923 // CHECK11-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 8924 // CHECK11-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 8925 // CHECK11-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 8926 // CHECK11-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4 8927 // CHECK11-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 8928 // CHECK11-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 8929 // CHECK11-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8 8930 // CHECK11-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 8931 // CHECK11-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8 8932 // CHECK11-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 8933 // CHECK11-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 8934 // CHECK11-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 8935 // CHECK11-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8 8936 // CHECK11-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 8937 // CHECK11-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8 8938 // CHECK11-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 8939 // CHECK11-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4 8940 // CHECK11-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 8941 // CHECK11-NEXT: store i64 [[ADD20]], i64* [[X]], align 4 8942 // CHECK11-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 8943 // CHECK11-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4 8944 // CHECK11-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 8945 // CHECK11-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 8946 // CHECK11-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 8947 // CHECK11-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4 8948 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8949 // CHECK11: omp.body.continue: 8950 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8951 // CHECK11: omp.inner.for.inc: 8952 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 8953 // CHECK11-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 8954 // CHECK11-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4 8955 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] 8956 // CHECK11: omp.inner.for.end: 8957 // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 8958 // CHECK11: omp.dispatch.inc: 8959 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8960 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 8961 // CHECK11-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 8962 // CHECK11-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 8963 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8964 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 8965 // CHECK11-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 8966 // CHECK11-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 8967 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]] 8968 // CHECK11: omp.dispatch.end: 8969 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 8970 // CHECK11-NEXT: ret void 8971 // 8972 // 8973 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 8974 // CHECK11-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 8975 // CHECK11-NEXT: entry: 8976 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 8977 // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 8978 // CHECK11-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 8979 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 8980 // CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 8981 // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 8982 // CHECK11-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 8983 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 8984 // CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 8985 // CHECK11-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 8986 // CHECK11-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 8987 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 8988 // CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 8989 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 8990 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 8991 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 8992 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 8993 // CHECK11-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 8994 // CHECK11-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 8995 // CHECK11-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 8996 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 8997 // CHECK11-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 8998 // CHECK11-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 8999 // CHECK11-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 9000 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 9001 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 9002 // CHECK11-NEXT: ret void 9003 // 9004 // 9005 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4 9006 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 9007 // CHECK11-NEXT: entry: 9008 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9009 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9010 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9011 // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9012 // CHECK11-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 9013 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 9014 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9015 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 9016 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9017 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9018 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9019 // CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9020 // CHECK11-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 9021 // CHECK11-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 9022 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9023 // CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 9024 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 9025 // CHECK11-NEXT: ret void 9026 // 9027 // 9028 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 9029 // CHECK11-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 9030 // CHECK11-NEXT: entry: 9031 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 9032 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 9033 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 9034 // CHECK11-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 9035 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 9036 // CHECK11-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 9037 // CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 9038 // CHECK11-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 9039 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 9040 // CHECK11-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 9041 // CHECK11-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 9042 // CHECK11-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 9043 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 9044 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 9045 // CHECK11-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 9046 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 9047 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 9048 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 9049 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 9050 // CHECK11-NEXT: ret void 9051 // 9052 // 9053 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 9054 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 9055 // CHECK11-NEXT: entry: 9056 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9057 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9058 // CHECK11-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 9059 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 9060 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 9061 // CHECK11-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 9062 // CHECK11-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 9063 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 9064 // CHECK11-NEXT: [[TMP:%.*]] = alloca i64, align 4 9065 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 9066 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 9067 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 9068 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9069 // CHECK11-NEXT: [[IT:%.*]] = alloca i64, align 8 9070 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9071 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9072 // CHECK11-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 9073 // CHECK11-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 9074 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 9075 // CHECK11-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 9076 // CHECK11-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 9077 // CHECK11-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 9078 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 9079 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 9080 // CHECK11-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 9081 // CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 9082 // CHECK11-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 9083 // CHECK11-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 9084 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9085 // CHECK11-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 9086 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 9087 // CHECK11-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 9088 // CHECK11-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9089 // CHECK11-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 9090 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9091 // CHECK11: cond.true: 9092 // CHECK11-NEXT: br label [[COND_END:%.*]] 9093 // CHECK11: cond.false: 9094 // CHECK11-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9095 // CHECK11-NEXT: br label [[COND_END]] 9096 // CHECK11: cond.end: 9097 // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 9098 // CHECK11-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 9099 // CHECK11-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 9100 // CHECK11-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 9101 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9102 // CHECK11: omp.inner.for.cond: 9103 // CHECK11-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9104 // CHECK11-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9105 // CHECK11-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 9106 // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9107 // CHECK11: omp.inner.for.body: 9108 // CHECK11-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9109 // CHECK11-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 9110 // CHECK11-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 9111 // CHECK11-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 9112 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4 9113 // CHECK11-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 9114 // CHECK11-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 9115 // CHECK11-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 9116 // CHECK11-NEXT: store double [[ADD]], double* [[A]], align 4 9117 // CHECK11-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 9118 // CHECK11-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4 9119 // CHECK11-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 9120 // CHECK11-NEXT: store double [[INC]], double* [[A4]], align 4 9121 // CHECK11-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 9122 // CHECK11-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 9123 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 9124 // CHECK11-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 9125 // CHECK11-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2 9126 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9127 // CHECK11: omp.body.continue: 9128 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9129 // CHECK11: omp.inner.for.inc: 9130 // CHECK11-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9131 // CHECK11-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 9132 // CHECK11-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 9133 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] 9134 // CHECK11: omp.inner.for.end: 9135 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9136 // CHECK11: omp.loop.exit: 9137 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 9138 // CHECK11-NEXT: ret void 9139 // 9140 // 9141 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 9142 // CHECK11-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 9143 // CHECK11-NEXT: entry: 9144 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9145 // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9146 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 9147 // CHECK11-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 9148 // CHECK11-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 9149 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9150 // CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9151 // CHECK11-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 9152 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9153 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 9154 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 9155 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 9156 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 9157 // CHECK11-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 9158 // CHECK11-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 9159 // CHECK11-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 9160 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 9161 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 9162 // CHECK11-NEXT: ret void 9163 // 9164 // 9165 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 9166 // CHECK11-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 9167 // CHECK11-NEXT: entry: 9168 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9169 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9170 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9171 // CHECK11-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9172 // CHECK11-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 9173 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 9174 // CHECK11-NEXT: [[TMP:%.*]] = alloca i64, align 4 9175 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 9176 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 9177 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 9178 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9179 // CHECK11-NEXT: [[I:%.*]] = alloca i64, align 8 9180 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9181 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9182 // CHECK11-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9183 // CHECK11-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9184 // CHECK11-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 9185 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9186 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 9187 // CHECK11-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 9188 // CHECK11-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 9189 // CHECK11-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 9190 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9191 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 9192 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 9193 // CHECK11-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 9194 // CHECK11-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9195 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 9196 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9197 // CHECK11: cond.true: 9198 // CHECK11-NEXT: br label [[COND_END:%.*]] 9199 // CHECK11: cond.false: 9200 // CHECK11-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9201 // CHECK11-NEXT: br label [[COND_END]] 9202 // CHECK11: cond.end: 9203 // CHECK11-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 9204 // CHECK11-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 9205 // CHECK11-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 9206 // CHECK11-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 9207 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9208 // CHECK11: omp.inner.for.cond: 9209 // CHECK11-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9210 // CHECK11-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9211 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 9212 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9213 // CHECK11: omp.inner.for.body: 9214 // CHECK11-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9215 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 9216 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 9217 // CHECK11-NEXT: store i64 [[ADD]], i64* [[I]], align 8 9218 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 9219 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 9220 // CHECK11-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4 9221 // CHECK11-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4 9222 // CHECK11-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 9223 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 9224 // CHECK11-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 9225 // CHECK11-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4 9226 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 9227 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 9228 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 9229 // CHECK11-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 9230 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9231 // CHECK11: omp.body.continue: 9232 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9233 // CHECK11: omp.inner.for.inc: 9234 // CHECK11-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9235 // CHECK11-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 9236 // CHECK11-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 9237 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]] 9238 // CHECK11: omp.inner.for.end: 9239 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9240 // CHECK11: omp.loop.exit: 9241 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 9242 // CHECK11-NEXT: ret void 9243 // 9244 // 9245 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 9246 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] { 9247 // CHECK12-NEXT: entry: 9248 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 9249 // CHECK12-NEXT: ret void 9250 // 9251 // 9252 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined. 9253 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 9254 // CHECK12-NEXT: entry: 9255 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9256 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9257 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9258 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 9259 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9260 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9261 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9262 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9263 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 9264 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9265 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9266 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9267 // CHECK12-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 9268 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9269 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9270 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 9271 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 9272 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9273 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9274 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 9275 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9276 // CHECK12: cond.true: 9277 // CHECK12-NEXT: br label [[COND_END:%.*]] 9278 // CHECK12: cond.false: 9279 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9280 // CHECK12-NEXT: br label [[COND_END]] 9281 // CHECK12: cond.end: 9282 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 9283 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9284 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9285 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 9286 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9287 // CHECK12: omp.inner.for.cond: 9288 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9289 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9290 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 9291 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9292 // CHECK12: omp.inner.for.body: 9293 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9294 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 9295 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 9296 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4 9297 // CHECK12-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 9298 // CHECK12-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 9299 // CHECK12-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 9300 // CHECK12: .cancel.exit: 9301 // CHECK12-NEXT: br label [[CANCEL_EXIT:%.*]] 9302 // CHECK12: .cancel.continue: 9303 // CHECK12-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 9304 // CHECK12-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 9305 // CHECK12-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 9306 // CHECK12: .cancel.exit2: 9307 // CHECK12-NEXT: br label [[CANCEL_EXIT]] 9308 // CHECK12: .cancel.continue3: 9309 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9310 // CHECK12: omp.body.continue: 9311 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9312 // CHECK12: omp.inner.for.inc: 9313 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9314 // CHECK12-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 9315 // CHECK12-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 9316 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]] 9317 // CHECK12: omp.inner.for.end: 9318 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9319 // CHECK12: omp.loop.exit: 9320 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 9321 // CHECK12-NEXT: br label [[CANCEL_CONT:%.*]] 9322 // CHECK12: cancel.cont: 9323 // CHECK12-NEXT: ret void 9324 // CHECK12: cancel.exit: 9325 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 9326 // CHECK12-NEXT: br label [[CANCEL_CONT]] 9327 // 9328 // 9329 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 9330 // CHECK12-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR0]] { 9331 // CHECK12-NEXT: entry: 9332 // CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9333 // CHECK12-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 9334 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9335 // CHECK12-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 9336 // CHECK12-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 9337 // CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 9338 // CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9339 // CHECK12-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 9340 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9341 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9342 // CHECK12-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 9343 // CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 9344 // CHECK12-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 9345 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 9346 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 9347 // CHECK12-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 9348 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 9349 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 9350 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 9351 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 9352 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 9353 // CHECK12-NEXT: ret void 9354 // 9355 // 9356 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1 9357 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR1]] { 9358 // CHECK12-NEXT: entry: 9359 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9360 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9361 // CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9362 // CHECK12-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 9363 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9364 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 9365 // CHECK12-NEXT: [[TMP:%.*]] = alloca i64, align 4 9366 // CHECK12-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 9367 // CHECK12-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 9368 // CHECK12-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 9369 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 9370 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 9371 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 9372 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9373 // CHECK12-NEXT: [[IT:%.*]] = alloca i64, align 8 9374 // CHECK12-NEXT: [[LIN2:%.*]] = alloca i32, align 4 9375 // CHECK12-NEXT: [[A3:%.*]] = alloca i32, align 4 9376 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9377 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9378 // CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9379 // CHECK12-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 9380 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9381 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9382 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 9383 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 9384 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 9385 // CHECK12-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 9386 // CHECK12-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 9387 // CHECK12-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 9388 // CHECK12-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 9389 // CHECK12-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 9390 // CHECK12-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 9391 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9392 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 9393 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 9394 // CHECK12-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 9395 // CHECK12-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 9396 // CHECK12-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9397 // CHECK12-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 9398 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9399 // CHECK12: cond.true: 9400 // CHECK12-NEXT: br label [[COND_END:%.*]] 9401 // CHECK12: cond.false: 9402 // CHECK12-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9403 // CHECK12-NEXT: br label [[COND_END]] 9404 // CHECK12: cond.end: 9405 // CHECK12-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 9406 // CHECK12-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 9407 // CHECK12-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 9408 // CHECK12-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 9409 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9410 // CHECK12: omp.inner.for.cond: 9411 // CHECK12-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9412 // CHECK12-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9413 // CHECK12-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 9414 // CHECK12-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9415 // CHECK12: omp.inner.for.body: 9416 // CHECK12-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9417 // CHECK12-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 9418 // CHECK12-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 9419 // CHECK12-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 9420 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 9421 // CHECK12-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 9422 // CHECK12-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9423 // CHECK12-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 9424 // CHECK12-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 9425 // CHECK12-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 9426 // CHECK12-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 9427 // CHECK12-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4 9428 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 9429 // CHECK12-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 9430 // CHECK12-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9431 // CHECK12-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 9432 // CHECK12-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 9433 // CHECK12-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 9434 // CHECK12-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 9435 // CHECK12-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4 9436 // CHECK12-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4 9437 // CHECK12-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 9438 // CHECK12-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 9439 // CHECK12-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 9440 // CHECK12-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4 9441 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9442 // CHECK12: omp.body.continue: 9443 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9444 // CHECK12: omp.inner.for.inc: 9445 // CHECK12-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9446 // CHECK12-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 9447 // CHECK12-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8 9448 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]] 9449 // CHECK12: omp.inner.for.end: 9450 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9451 // CHECK12: omp.loop.exit: 9452 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 9453 // CHECK12-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9454 // CHECK12-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 9455 // CHECK12-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 9456 // CHECK12: .omp.linear.pu: 9457 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 9458 // CHECK12-NEXT: [[CONV16:%.*]] = sext i32 [[TMP20]] to i64 9459 // CHECK12-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 9460 // CHECK12-NEXT: [[MUL17:%.*]] = mul i64 4, [[TMP21]] 9461 // CHECK12-NEXT: [[ADD18:%.*]] = add i64 [[CONV16]], [[MUL17]] 9462 // CHECK12-NEXT: [[CONV19:%.*]] = trunc i64 [[ADD18]] to i32 9463 // CHECK12-NEXT: store i32 [[CONV19]], i32* [[LIN_ADDR]], align 4 9464 // CHECK12-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 9465 // CHECK12-NEXT: [[CONV20:%.*]] = sext i32 [[TMP22]] to i64 9466 // CHECK12-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 9467 // CHECK12-NEXT: [[MUL21:%.*]] = mul i64 4, [[TMP23]] 9468 // CHECK12-NEXT: [[ADD22:%.*]] = add i64 [[CONV20]], [[MUL21]] 9469 // CHECK12-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD22]] to i32 9470 // CHECK12-NEXT: store i32 [[CONV23]], i32* [[A_ADDR]], align 4 9471 // CHECK12-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 9472 // CHECK12: .omp.linear.pu.done: 9473 // CHECK12-NEXT: ret void 9474 // 9475 // 9476 // CHECK12-LABEL: define {{[^@]+}}@_Z7get_valv 9477 // CHECK12-SAME: () #[[ATTR3:[0-9]+]] { 9478 // CHECK12-NEXT: entry: 9479 // CHECK12-NEXT: ret i64 0 9480 // 9481 // 9482 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 9483 // CHECK12-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] { 9484 // CHECK12-NEXT: entry: 9485 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9486 // CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9487 // CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 9488 // CHECK12-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 9489 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9490 // CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9491 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9492 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 9493 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 9494 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 9495 // CHECK12-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 9496 // CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 9497 // CHECK12-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 9498 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 9499 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 9500 // CHECK12-NEXT: ret void 9501 // 9502 // 9503 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..2 9504 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR1]] { 9505 // CHECK12-NEXT: entry: 9506 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9507 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9508 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9509 // CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9510 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9511 // CHECK12-NEXT: [[TMP:%.*]] = alloca i16, align 2 9512 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9513 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9514 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9515 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9516 // CHECK12-NEXT: [[IT:%.*]] = alloca i16, align 2 9517 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9518 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9519 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9520 // CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9521 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9522 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9523 // CHECK12-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 9524 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9525 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9526 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 9527 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 9528 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9529 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9530 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 9531 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9532 // CHECK12: cond.true: 9533 // CHECK12-NEXT: br label [[COND_END:%.*]] 9534 // CHECK12: cond.false: 9535 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9536 // CHECK12-NEXT: br label [[COND_END]] 9537 // CHECK12: cond.end: 9538 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 9539 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9540 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9541 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 9542 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9543 // CHECK12: omp.inner.for.cond: 9544 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9545 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9546 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 9547 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9548 // CHECK12: omp.inner.for.body: 9549 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9550 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 9551 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 9552 // CHECK12-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 9553 // CHECK12-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2 9554 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 9555 // CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 9556 // CHECK12-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4 9557 // CHECK12-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4 9558 // CHECK12-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 9559 // CHECK12-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 9560 // CHECK12-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 9561 // CHECK12-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4 9562 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9563 // CHECK12: omp.body.continue: 9564 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9565 // CHECK12: omp.inner.for.inc: 9566 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9567 // CHECK12-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 9568 // CHECK12-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 9569 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]] 9570 // CHECK12: omp.inner.for.end: 9571 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9572 // CHECK12: omp.loop.exit: 9573 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 9574 // CHECK12-NEXT: ret void 9575 // 9576 // 9577 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 9578 // CHECK12-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 9579 // CHECK12-NEXT: entry: 9580 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9581 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 9582 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 9583 // CHECK12-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 9584 // CHECK12-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 9585 // CHECK12-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 9586 // CHECK12-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 9587 // CHECK12-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 9588 // CHECK12-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 9589 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 9590 // CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 9591 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 9592 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9593 // CHECK12-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 9594 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 9595 // CHECK12-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 9596 // CHECK12-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 9597 // CHECK12-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 9598 // CHECK12-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 9599 // CHECK12-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 9600 // CHECK12-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 9601 // CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 9602 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 9603 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 9604 // CHECK12-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 9605 // CHECK12-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 9606 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 9607 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 9608 // CHECK12-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 9609 // CHECK12-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 9610 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 9611 // CHECK12-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 9612 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 9613 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 9614 // CHECK12-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 9615 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 9616 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 9617 // CHECK12-NEXT: ret void 9618 // 9619 // 9620 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3 9621 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 9622 // CHECK12-NEXT: entry: 9623 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9624 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9625 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9626 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 9627 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 9628 // CHECK12-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 9629 // CHECK12-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 9630 // CHECK12-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 9631 // CHECK12-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 9632 // CHECK12-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 9633 // CHECK12-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 9634 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 9635 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9636 // CHECK12-NEXT: [[TMP:%.*]] = alloca i8, align 1 9637 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9638 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9639 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9640 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9641 // CHECK12-NEXT: [[IT:%.*]] = alloca i8, align 1 9642 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9643 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9644 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9645 // CHECK12-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 9646 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 9647 // CHECK12-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 9648 // CHECK12-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 9649 // CHECK12-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 9650 // CHECK12-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 9651 // CHECK12-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 9652 // CHECK12-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 9653 // CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 9654 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 9655 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 9656 // CHECK12-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 9657 // CHECK12-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 9658 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 9659 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 9660 // CHECK12-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 9661 // CHECK12-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 9662 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9663 // CHECK12-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 9664 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9665 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9666 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 9667 // CHECK12-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 9668 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 9669 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 9670 // CHECK12-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 9671 // CHECK12: omp.dispatch.cond: 9672 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9673 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 9674 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9675 // CHECK12: cond.true: 9676 // CHECK12-NEXT: br label [[COND_END:%.*]] 9677 // CHECK12: cond.false: 9678 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9679 // CHECK12-NEXT: br label [[COND_END]] 9680 // CHECK12: cond.end: 9681 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 9682 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9683 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9684 // CHECK12-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 9685 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9686 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9687 // CHECK12-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 9688 // CHECK12-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 9689 // CHECK12: omp.dispatch.body: 9690 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9691 // CHECK12: omp.inner.for.cond: 9692 // CHECK12-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9693 // CHECK12-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9694 // CHECK12-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 9695 // CHECK12-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9696 // CHECK12: omp.inner.for.body: 9697 // CHECK12-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9698 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 9699 // CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 9700 // CHECK12-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 9701 // CHECK12-NEXT: store i8 [[CONV]], i8* [[IT]], align 1 9702 // CHECK12-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4 9703 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 9704 // CHECK12-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 9705 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 9706 // CHECK12-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 9707 // CHECK12-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 9708 // CHECK12-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 9709 // CHECK12-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 9710 // CHECK12-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4 9711 // CHECK12-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 9712 // CHECK12-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4 9713 // CHECK12-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 9714 // CHECK12-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 9715 // CHECK12-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 9716 // CHECK12-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4 9717 // CHECK12-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 9718 // CHECK12-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 9719 // CHECK12-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8 9720 // CHECK12-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 9721 // CHECK12-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8 9722 // CHECK12-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 9723 // CHECK12-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 9724 // CHECK12-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 9725 // CHECK12-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8 9726 // CHECK12-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 9727 // CHECK12-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8 9728 // CHECK12-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 9729 // CHECK12-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4 9730 // CHECK12-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 9731 // CHECK12-NEXT: store i64 [[ADD20]], i64* [[X]], align 4 9732 // CHECK12-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 9733 // CHECK12-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4 9734 // CHECK12-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 9735 // CHECK12-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 9736 // CHECK12-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 9737 // CHECK12-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4 9738 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9739 // CHECK12: omp.body.continue: 9740 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9741 // CHECK12: omp.inner.for.inc: 9742 // CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 9743 // CHECK12-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 9744 // CHECK12-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4 9745 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]] 9746 // CHECK12: omp.inner.for.end: 9747 // CHECK12-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 9748 // CHECK12: omp.dispatch.inc: 9749 // CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9750 // CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 9751 // CHECK12-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 9752 // CHECK12-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 9753 // CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9754 // CHECK12-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 9755 // CHECK12-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 9756 // CHECK12-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 9757 // CHECK12-NEXT: br label [[OMP_DISPATCH_COND]] 9758 // CHECK12: omp.dispatch.end: 9759 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 9760 // CHECK12-NEXT: ret void 9761 // 9762 // 9763 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 9764 // CHECK12-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 9765 // CHECK12-NEXT: entry: 9766 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9767 // CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9768 // CHECK12-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 9769 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 9770 // CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 9771 // CHECK12-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 9772 // CHECK12-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 9773 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9774 // CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9775 // CHECK12-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 9776 // CHECK12-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 9777 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9778 // CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 9779 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 9780 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 9781 // CHECK12-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 9782 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 9783 // CHECK12-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 9784 // CHECK12-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 9785 // CHECK12-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 9786 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 9787 // CHECK12-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 9788 // CHECK12-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 9789 // CHECK12-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 9790 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 9791 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 9792 // CHECK12-NEXT: ret void 9793 // 9794 // 9795 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..4 9796 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 9797 // CHECK12-NEXT: entry: 9798 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9799 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9800 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9801 // CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9802 // CHECK12-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 9803 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 9804 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9805 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 9806 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9807 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9808 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9809 // CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9810 // CHECK12-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 9811 // CHECK12-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 9812 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9813 // CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 9814 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 9815 // CHECK12-NEXT: ret void 9816 // 9817 // 9818 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 9819 // CHECK12-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 9820 // CHECK12-NEXT: entry: 9821 // CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 9822 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 9823 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 9824 // CHECK12-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 9825 // CHECK12-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 9826 // CHECK12-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 9827 // CHECK12-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 9828 // CHECK12-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 9829 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 9830 // CHECK12-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 9831 // CHECK12-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 9832 // CHECK12-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 9833 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 9834 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 9835 // CHECK12-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 9836 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 9837 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 9838 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 9839 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 9840 // CHECK12-NEXT: ret void 9841 // 9842 // 9843 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 9844 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 9845 // CHECK12-NEXT: entry: 9846 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9847 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9848 // CHECK12-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 9849 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 9850 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 9851 // CHECK12-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 9852 // CHECK12-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 9853 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 9854 // CHECK12-NEXT: [[TMP:%.*]] = alloca i64, align 4 9855 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 9856 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 9857 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 9858 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9859 // CHECK12-NEXT: [[IT:%.*]] = alloca i64, align 8 9860 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9861 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9862 // CHECK12-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 9863 // CHECK12-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 9864 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 9865 // CHECK12-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 9866 // CHECK12-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 9867 // CHECK12-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 9868 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 9869 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 9870 // CHECK12-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 9871 // CHECK12-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 9872 // CHECK12-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 9873 // CHECK12-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 9874 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9875 // CHECK12-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 9876 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 9877 // CHECK12-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 9878 // CHECK12-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9879 // CHECK12-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 9880 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9881 // CHECK12: cond.true: 9882 // CHECK12-NEXT: br label [[COND_END:%.*]] 9883 // CHECK12: cond.false: 9884 // CHECK12-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9885 // CHECK12-NEXT: br label [[COND_END]] 9886 // CHECK12: cond.end: 9887 // CHECK12-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 9888 // CHECK12-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 9889 // CHECK12-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 9890 // CHECK12-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 9891 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9892 // CHECK12: omp.inner.for.cond: 9893 // CHECK12-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9894 // CHECK12-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9895 // CHECK12-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 9896 // CHECK12-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9897 // CHECK12: omp.inner.for.body: 9898 // CHECK12-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9899 // CHECK12-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 9900 // CHECK12-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 9901 // CHECK12-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 9902 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4 9903 // CHECK12-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 9904 // CHECK12-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 9905 // CHECK12-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 9906 // CHECK12-NEXT: store double [[ADD]], double* [[A]], align 4 9907 // CHECK12-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 9908 // CHECK12-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4 9909 // CHECK12-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 9910 // CHECK12-NEXT: store double [[INC]], double* [[A4]], align 4 9911 // CHECK12-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 9912 // CHECK12-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 9913 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 9914 // CHECK12-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 9915 // CHECK12-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2 9916 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9917 // CHECK12: omp.body.continue: 9918 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9919 // CHECK12: omp.inner.for.inc: 9920 // CHECK12-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 9921 // CHECK12-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 9922 // CHECK12-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 9923 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]] 9924 // CHECK12: omp.inner.for.end: 9925 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9926 // CHECK12: omp.loop.exit: 9927 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 9928 // CHECK12-NEXT: ret void 9929 // 9930 // 9931 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 9932 // CHECK12-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 9933 // CHECK12-NEXT: entry: 9934 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9935 // CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9936 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 9937 // CHECK12-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 9938 // CHECK12-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 9939 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9940 // CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9941 // CHECK12-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 9942 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9943 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 9944 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 9945 // CHECK12-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 9946 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 9947 // CHECK12-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 9948 // CHECK12-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 9949 // CHECK12-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 9950 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 9951 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 9952 // CHECK12-NEXT: ret void 9953 // 9954 // 9955 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 9956 // CHECK12-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 9957 // CHECK12-NEXT: entry: 9958 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 9959 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 9960 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 9961 // CHECK12-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 9962 // CHECK12-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 9963 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 9964 // CHECK12-NEXT: [[TMP:%.*]] = alloca i64, align 4 9965 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 9966 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 9967 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 9968 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9969 // CHECK12-NEXT: [[I:%.*]] = alloca i64, align 8 9970 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 9971 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 9972 // CHECK12-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 9973 // CHECK12-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 9974 // CHECK12-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 9975 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 9976 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 9977 // CHECK12-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 9978 // CHECK12-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 9979 // CHECK12-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 9980 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9981 // CHECK12-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 9982 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 9983 // CHECK12-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 9984 // CHECK12-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9985 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 9986 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9987 // CHECK12: cond.true: 9988 // CHECK12-NEXT: br label [[COND_END:%.*]] 9989 // CHECK12: cond.false: 9990 // CHECK12-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 9991 // CHECK12-NEXT: br label [[COND_END]] 9992 // CHECK12: cond.end: 9993 // CHECK12-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 9994 // CHECK12-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 9995 // CHECK12-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 9996 // CHECK12-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 9997 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9998 // CHECK12: omp.inner.for.cond: 9999 // CHECK12-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 10000 // CHECK12-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 10001 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 10002 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10003 // CHECK12: omp.inner.for.body: 10004 // CHECK12-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 10005 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 10006 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 10007 // CHECK12-NEXT: store i64 [[ADD]], i64* [[I]], align 8 10008 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 10009 // CHECK12-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 10010 // CHECK12-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4 10011 // CHECK12-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4 10012 // CHECK12-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 10013 // CHECK12-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 10014 // CHECK12-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 10015 // CHECK12-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4 10016 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 10017 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 10018 // CHECK12-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 10019 // CHECK12-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 10020 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10021 // CHECK12: omp.body.continue: 10022 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10023 // CHECK12: omp.inner.for.inc: 10024 // CHECK12-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 10025 // CHECK12-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 10026 // CHECK12-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 10027 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]] 10028 // CHECK12: omp.inner.for.end: 10029 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10030 // CHECK12: omp.loop.exit: 10031 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 10032 // CHECK12-NEXT: ret void 10033 // 10034 // 10035 // CHECK17-LABEL: define {{[^@]+}}@_Z7get_valv 10036 // CHECK17-SAME: () #[[ATTR0:[0-9]+]] { 10037 // CHECK17-NEXT: entry: 10038 // CHECK17-NEXT: ret i64 0 10039 // 10040 // 10041 // CHECK17-LABEL: define {{[^@]+}}@_Z3fooi 10042 // CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 10043 // CHECK17-NEXT: entry: 10044 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 10045 // CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4 10046 // CHECK17-NEXT: [[AA:%.*]] = alloca i16, align 2 10047 // CHECK17-NEXT: [[B:%.*]] = alloca [10 x float], align 4 10048 // CHECK17-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 10049 // CHECK17-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 10050 // CHECK17-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 10051 // CHECK17-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 10052 // CHECK17-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 10053 // CHECK17-NEXT: [[K:%.*]] = alloca i64, align 8 10054 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 10055 // CHECK17-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 10056 // CHECK17-NEXT: [[LIN:%.*]] = alloca i32, align 4 10057 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 10058 // CHECK17-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 10059 // CHECK17-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 10060 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 10061 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 10062 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 10063 // CHECK17-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4 10064 // CHECK17-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 10065 // CHECK17-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 10066 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 10067 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 10068 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 10069 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 10070 // CHECK17-NEXT: [[A_CASTED15:%.*]] = alloca i64, align 8 10071 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 10072 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 10073 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 10074 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 10075 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 10076 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 10077 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 10078 // CHECK17-NEXT: store i32 0, i32* [[A]], align 4 10079 // CHECK17-NEXT: store i16 0, i16* [[AA]], align 2 10080 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 10081 // CHECK17-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 10082 // CHECK17-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 10083 // CHECK17-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 10084 // CHECK17-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 10085 // CHECK17-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 10086 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 10087 // CHECK17-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 10088 // CHECK17-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] 10089 // CHECK17-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 10090 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 10091 // CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) 10092 // CHECK17-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 10093 // CHECK17-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 10094 // CHECK17: omp_offload.failed: 10095 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103() #[[ATTR4:[0-9]+]] 10096 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] 10097 // CHECK17: omp_offload.cont: 10098 // CHECK17-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 10099 // CHECK17-NEXT: store i64 [[CALL]], i64* [[K]], align 8 10100 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 10101 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 10102 // CHECK17-NEXT: store i32 [[TMP9]], i32* [[CONV]], align 4 10103 // CHECK17-NEXT: [[TMP10:%.*]] = load i64, i64* [[A_CASTED]], align 8 10104 // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[K]], align 8 10105 // CHECK17-NEXT: store i64 [[TMP11]], i64* [[K_CASTED]], align 8 10106 // CHECK17-NEXT: [[TMP12:%.*]] = load i64, i64* [[K_CASTED]], align 8 10107 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP10]], i64 [[TMP12]]) #[[ATTR4]] 10108 // CHECK17-NEXT: store i32 12, i32* [[LIN]], align 4 10109 // CHECK17-NEXT: [[TMP13:%.*]] = load i16, i16* [[AA]], align 2 10110 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 10111 // CHECK17-NEXT: store i16 [[TMP13]], i16* [[CONV2]], align 2 10112 // CHECK17-NEXT: [[TMP14:%.*]] = load i64, i64* [[AA_CASTED]], align 8 10113 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[LIN]], align 4 10114 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 10115 // CHECK17-NEXT: store i32 [[TMP15]], i32* [[CONV3]], align 4 10116 // CHECK17-NEXT: [[TMP16:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 10117 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[A]], align 4 10118 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* 10119 // CHECK17-NEXT: store i32 [[TMP17]], i32* [[CONV5]], align 4 10120 // CHECK17-NEXT: [[TMP18:%.*]] = load i64, i64* [[A_CASTED4]], align 8 10121 // CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 10122 // CHECK17-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 10123 // CHECK17-NEXT: store i64 [[TMP14]], i64* [[TMP20]], align 8 10124 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 10125 // CHECK17-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 10126 // CHECK17-NEXT: store i64 [[TMP14]], i64* [[TMP22]], align 8 10127 // CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 10128 // CHECK17-NEXT: store i8* null, i8** [[TMP23]], align 8 10129 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 10130 // CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 10131 // CHECK17-NEXT: store i64 [[TMP16]], i64* [[TMP25]], align 8 10132 // CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 10133 // CHECK17-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* 10134 // CHECK17-NEXT: store i64 [[TMP16]], i64* [[TMP27]], align 8 10135 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 10136 // CHECK17-NEXT: store i8* null, i8** [[TMP28]], align 8 10137 // CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 10138 // CHECK17-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* 10139 // CHECK17-NEXT: store i64 [[TMP18]], i64* [[TMP30]], align 8 10140 // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 10141 // CHECK17-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 10142 // CHECK17-NEXT: store i64 [[TMP18]], i64* [[TMP32]], align 8 10143 // CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 10144 // CHECK17-NEXT: store i8* null, i8** [[TMP33]], align 8 10145 // CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 10146 // CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 10147 // CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0 10148 // CHECK17-NEXT: [[TMP37:%.*]] = load i16, i16* [[AA]], align 2 10149 // CHECK17-NEXT: store i16 [[TMP37]], i16* [[TMP36]], align 4 10150 // CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1 10151 // CHECK17-NEXT: [[TMP39:%.*]] = load i32, i32* [[LIN]], align 4 10152 // CHECK17-NEXT: store i32 [[TMP39]], i32* [[TMP38]], align 4 10153 // CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2 10154 // CHECK17-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4 10155 // CHECK17-NEXT: store i32 [[TMP41]], i32* [[TMP40]], align 4 10156 // CHECK17-NEXT: [[TMP42:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 10157 // CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8* [[TMP42]] to %struct.kmp_task_t_with_privates* 10158 // CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP43]], i32 0, i32 0 10159 // CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP44]], i32 0, i32 0 10160 // CHECK17-NEXT: [[TMP46:%.*]] = load i8*, i8** [[TMP45]], align 8 10161 // CHECK17-NEXT: [[TMP47:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* 10162 // CHECK17-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP46]], i8* align 4 [[TMP47]], i64 12, i1 false) 10163 // CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP43]], i32 0, i32 1 10164 // CHECK17-NEXT: [[TMP49:%.*]] = bitcast i8* [[TMP46]] to %struct.anon* 10165 // CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 0 10166 // CHECK17-NEXT: [[TMP51:%.*]] = bitcast [3 x i8*]* [[TMP50]] to i8* 10167 // CHECK17-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP34]] to i8* 10168 // CHECK17-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP51]], i8* align 8 [[TMP52]], i64 24, i1 false) 10169 // CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 1 10170 // CHECK17-NEXT: [[TMP54:%.*]] = bitcast [3 x i8*]* [[TMP53]] to i8* 10171 // CHECK17-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP35]] to i8* 10172 // CHECK17-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP54]], i8* align 8 [[TMP55]], i64 24, i1 false) 10173 // CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 2 10174 // CHECK17-NEXT: [[TMP57:%.*]] = bitcast [3 x i64]* [[TMP56]] to i8* 10175 // CHECK17-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP57]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes to i8*), i64 24, i1 false) 10176 // CHECK17-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 3 10177 // CHECK17-NEXT: [[TMP59:%.*]] = load i16, i16* [[AA]], align 2 10178 // CHECK17-NEXT: store i16 [[TMP59]], i16* [[TMP58]], align 8 10179 // CHECK17-NEXT: [[TMP60:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP42]]) 10180 // CHECK17-NEXT: [[TMP61:%.*]] = load i32, i32* [[A]], align 4 10181 // CHECK17-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* 10182 // CHECK17-NEXT: store i32 [[TMP61]], i32* [[CONV7]], align 4 10183 // CHECK17-NEXT: [[TMP62:%.*]] = load i64, i64* [[A_CASTED6]], align 8 10184 // CHECK17-NEXT: [[TMP63:%.*]] = load i16, i16* [[AA]], align 2 10185 // CHECK17-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* 10186 // CHECK17-NEXT: store i16 [[TMP63]], i16* [[CONV9]], align 2 10187 // CHECK17-NEXT: [[TMP64:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 10188 // CHECK17-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_ADDR]], align 4 10189 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP65]], 10 10190 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 10191 // CHECK17: omp_if.then: 10192 // CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 10193 // CHECK17-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* 10194 // CHECK17-NEXT: store i64 [[TMP62]], i64* [[TMP67]], align 8 10195 // CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 10196 // CHECK17-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 10197 // CHECK17-NEXT: store i64 [[TMP62]], i64* [[TMP69]], align 8 10198 // CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 10199 // CHECK17-NEXT: store i8* null, i8** [[TMP70]], align 8 10200 // CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 10201 // CHECK17-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* 10202 // CHECK17-NEXT: store i64 [[TMP64]], i64* [[TMP72]], align 8 10203 // CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 10204 // CHECK17-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64* 10205 // CHECK17-NEXT: store i64 [[TMP64]], i64* [[TMP74]], align 8 10206 // CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 10207 // CHECK17-NEXT: store i8* null, i8** [[TMP75]], align 8 10208 // CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 10209 // CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 10210 // CHECK17-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146.region_id, i32 2, i8** [[TMP76]], i8** [[TMP77]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 10211 // CHECK17-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0 10212 // CHECK17-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]] 10213 // CHECK17: omp_offload.failed13: 10214 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i64 [[TMP62]], i64 [[TMP64]]) #[[ATTR4]] 10215 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT14]] 10216 // CHECK17: omp_offload.cont14: 10217 // CHECK17-NEXT: br label [[OMP_IF_END:%.*]] 10218 // CHECK17: omp_if.else: 10219 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i64 [[TMP62]], i64 [[TMP64]]) #[[ATTR4]] 10220 // CHECK17-NEXT: br label [[OMP_IF_END]] 10221 // CHECK17: omp_if.end: 10222 // CHECK17-NEXT: [[TMP80:%.*]] = load i32, i32* [[A]], align 4 10223 // CHECK17-NEXT: store i32 [[TMP80]], i32* [[DOTCAPTURE_EXPR_]], align 4 10224 // CHECK17-NEXT: [[TMP81:%.*]] = load i32, i32* [[A]], align 4 10225 // CHECK17-NEXT: [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32* 10226 // CHECK17-NEXT: store i32 [[TMP81]], i32* [[CONV16]], align 4 10227 // CHECK17-NEXT: [[TMP82:%.*]] = load i64, i64* [[A_CASTED15]], align 8 10228 // CHECK17-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 10229 // CHECK17-NEXT: [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 10230 // CHECK17-NEXT: store i32 [[TMP83]], i32* [[CONV17]], align 4 10231 // CHECK17-NEXT: [[TMP84:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 10232 // CHECK17-NEXT: [[TMP85:%.*]] = load i32, i32* [[N_ADDR]], align 4 10233 // CHECK17-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP85]], 20 10234 // CHECK17-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]] 10235 // CHECK17: omp_if.then19: 10236 // CHECK17-NEXT: [[TMP86:%.*]] = mul nuw i64 [[TMP2]], 4 10237 // CHECK17-NEXT: [[TMP87:%.*]] = mul nuw i64 5, [[TMP5]] 10238 // CHECK17-NEXT: [[TMP88:%.*]] = mul nuw i64 [[TMP87]], 8 10239 // CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 10240 // CHECK17-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* 10241 // CHECK17-NEXT: store i64 [[TMP82]], i64* [[TMP90]], align 8 10242 // CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 10243 // CHECK17-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* 10244 // CHECK17-NEXT: store i64 [[TMP82]], i64* [[TMP92]], align 8 10245 // CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 10246 // CHECK17-NEXT: store i64 4, i64* [[TMP93]], align 8 10247 // CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 10248 // CHECK17-NEXT: store i8* null, i8** [[TMP94]], align 8 10249 // CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 10250 // CHECK17-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** 10251 // CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 8 10252 // CHECK17-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 10253 // CHECK17-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x float]** 10254 // CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP98]], align 8 10255 // CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 10256 // CHECK17-NEXT: store i64 40, i64* [[TMP99]], align 8 10257 // CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 10258 // CHECK17-NEXT: store i8* null, i8** [[TMP100]], align 8 10259 // CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 10260 // CHECK17-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i64* 10261 // CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP102]], align 8 10262 // CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 10263 // CHECK17-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i64* 10264 // CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP104]], align 8 10265 // CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 10266 // CHECK17-NEXT: store i64 8, i64* [[TMP105]], align 8 10267 // CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 10268 // CHECK17-NEXT: store i8* null, i8** [[TMP106]], align 8 10269 // CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 10270 // CHECK17-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** 10271 // CHECK17-NEXT: store float* [[VLA]], float** [[TMP108]], align 8 10272 // CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 10273 // CHECK17-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to float** 10274 // CHECK17-NEXT: store float* [[VLA]], float** [[TMP110]], align 8 10275 // CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 10276 // CHECK17-NEXT: store i64 [[TMP86]], i64* [[TMP111]], align 8 10277 // CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 10278 // CHECK17-NEXT: store i8* null, i8** [[TMP112]], align 8 10279 // CHECK17-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 10280 // CHECK17-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** 10281 // CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 8 10282 // CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 10283 // CHECK17-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** 10284 // CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 10285 // CHECK17-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 10286 // CHECK17-NEXT: store i64 400, i64* [[TMP117]], align 8 10287 // CHECK17-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 10288 // CHECK17-NEXT: store i8* null, i8** [[TMP118]], align 8 10289 // CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 10290 // CHECK17-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* 10291 // CHECK17-NEXT: store i64 5, i64* [[TMP120]], align 8 10292 // CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 10293 // CHECK17-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* 10294 // CHECK17-NEXT: store i64 5, i64* [[TMP122]], align 8 10295 // CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 10296 // CHECK17-NEXT: store i64 8, i64* [[TMP123]], align 8 10297 // CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 10298 // CHECK17-NEXT: store i8* null, i8** [[TMP124]], align 8 10299 // CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 10300 // CHECK17-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* 10301 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 10302 // CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 10303 // CHECK17-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* 10304 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 10305 // CHECK17-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 10306 // CHECK17-NEXT: store i64 8, i64* [[TMP129]], align 8 10307 // CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 10308 // CHECK17-NEXT: store i8* null, i8** [[TMP130]], align 8 10309 // CHECK17-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 10310 // CHECK17-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** 10311 // CHECK17-NEXT: store double* [[VLA1]], double** [[TMP132]], align 8 10312 // CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 10313 // CHECK17-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** 10314 // CHECK17-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 10315 // CHECK17-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 10316 // CHECK17-NEXT: store i64 [[TMP88]], i64* [[TMP135]], align 8 10317 // CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 10318 // CHECK17-NEXT: store i8* null, i8** [[TMP136]], align 8 10319 // CHECK17-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 10320 // CHECK17-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** 10321 // CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 10322 // CHECK17-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 10323 // CHECK17-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to %struct.TT** 10324 // CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP140]], align 8 10325 // CHECK17-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 10326 // CHECK17-NEXT: store i64 16, i64* [[TMP141]], align 8 10327 // CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 10328 // CHECK17-NEXT: store i8* null, i8** [[TMP142]], align 8 10329 // CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 10330 // CHECK17-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* 10331 // CHECK17-NEXT: store i64 [[TMP84]], i64* [[TMP144]], align 8 10332 // CHECK17-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 10333 // CHECK17-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* 10334 // CHECK17-NEXT: store i64 [[TMP84]], i64* [[TMP146]], align 8 10335 // CHECK17-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 10336 // CHECK17-NEXT: store i64 4, i64* [[TMP147]], align 8 10337 // CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 10338 // CHECK17-NEXT: store i8* null, i8** [[TMP148]], align 8 10339 // CHECK17-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 10340 // CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 10341 // CHECK17-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 10342 // CHECK17-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 10343 // CHECK17-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 10344 // CHECK17-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] 10345 // CHECK17: omp_offload.failed23: 10346 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] 10347 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT24]] 10348 // CHECK17: omp_offload.cont24: 10349 // CHECK17-NEXT: br label [[OMP_IF_END26:%.*]] 10350 // CHECK17: omp_if.else25: 10351 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] 10352 // CHECK17-NEXT: br label [[OMP_IF_END26]] 10353 // CHECK17: omp_if.end26: 10354 // CHECK17-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 10355 // CHECK17-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 10356 // CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) 10357 // CHECK17-NEXT: ret i32 [[TMP154]] 10358 // 10359 // 10360 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 10361 // CHECK17-SAME: () #[[ATTR2:[0-9]+]] { 10362 // CHECK17-NEXT: entry: 10363 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 10364 // CHECK17-NEXT: ret void 10365 // 10366 // 10367 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined. 10368 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 10369 // CHECK17-NEXT: entry: 10370 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10371 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10372 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10373 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 10374 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10375 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10376 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10377 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10378 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4 10379 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10380 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10381 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10382 // CHECK17-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 10383 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10384 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10385 // CHECK17-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10386 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 10387 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10388 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10389 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 10390 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10391 // CHECK17: cond.true: 10392 // CHECK17-NEXT: br label [[COND_END:%.*]] 10393 // CHECK17: cond.false: 10394 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10395 // CHECK17-NEXT: br label [[COND_END]] 10396 // CHECK17: cond.end: 10397 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 10398 // CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10399 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10400 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 10401 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10402 // CHECK17: omp.inner.for.cond: 10403 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10404 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10405 // CHECK17-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 10406 // CHECK17-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10407 // CHECK17: omp.inner.for.body: 10408 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10409 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 10410 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 10411 // CHECK17-NEXT: store i32 [[ADD]], i32* [[I]], align 4 10412 // CHECK17-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 10413 // CHECK17-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 10414 // CHECK17-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 10415 // CHECK17: .cancel.exit: 10416 // CHECK17-NEXT: br label [[CANCEL_EXIT:%.*]] 10417 // CHECK17: .cancel.continue: 10418 // CHECK17-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 10419 // CHECK17-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 10420 // CHECK17-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 10421 // CHECK17: .cancel.exit2: 10422 // CHECK17-NEXT: br label [[CANCEL_EXIT]] 10423 // CHECK17: .cancel.continue3: 10424 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10425 // CHECK17: omp.body.continue: 10426 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10427 // CHECK17: omp.inner.for.inc: 10428 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10429 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 10430 // CHECK17-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 10431 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] 10432 // CHECK17: omp.inner.for.end: 10433 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10434 // CHECK17: omp.loop.exit: 10435 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 10436 // CHECK17-NEXT: br label [[CANCEL_CONT:%.*]] 10437 // CHECK17: cancel.cont: 10438 // CHECK17-NEXT: ret void 10439 // CHECK17: cancel.exit: 10440 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 10441 // CHECK17-NEXT: br label [[CANCEL_CONT]] 10442 // 10443 // 10444 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110 10445 // CHECK17-SAME: (i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 10446 // CHECK17-NEXT: entry: 10447 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 10448 // CHECK17-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 10449 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 10450 // CHECK17-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 10451 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 10452 // CHECK17-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 10453 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 10454 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 10455 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* 10456 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 10457 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 10458 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 10459 // CHECK17-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 10460 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 10461 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 10462 // CHECK17-NEXT: ret void 10463 // 10464 // 10465 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..1 10466 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 10467 // CHECK17-NEXT: entry: 10468 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10469 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10470 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 10471 // CHECK17-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 10472 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10473 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 10474 // CHECK17-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 10475 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10476 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10477 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10478 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10479 // CHECK17-NEXT: [[I:%.*]] = alloca i32, align 4 10480 // CHECK17-NEXT: [[K1:%.*]] = alloca i64, align 8 10481 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10482 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10483 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 10484 // CHECK17-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 10485 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 10486 // CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 10487 // CHECK17-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 10488 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10489 // CHECK17-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 10490 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10491 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10492 // CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10493 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 10494 // CHECK17-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 10495 // CHECK17-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) 10496 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 10497 // CHECK17: omp.dispatch.cond: 10498 // CHECK17-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 10499 // CHECK17-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 10500 // CHECK17-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 10501 // CHECK17: omp.dispatch.body: 10502 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10503 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 10504 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10505 // CHECK17: omp.inner.for.cond: 10506 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 10507 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 10508 // CHECK17-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 10509 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10510 // CHECK17: omp.inner.for.body: 10511 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 10512 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 10513 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 10514 // CHECK17-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !12 10515 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !12 10516 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 10517 // CHECK17-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 10518 // CHECK17-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 10519 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] 10520 // CHECK17-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !12 10521 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !12 10522 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 10523 // CHECK17-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !12 10524 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10525 // CHECK17: omp.body.continue: 10526 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10527 // CHECK17: omp.inner.for.inc: 10528 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 10529 // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 10530 // CHECK17-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 10531 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 10532 // CHECK17: omp.inner.for.end: 10533 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 10534 // CHECK17: omp.dispatch.inc: 10535 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] 10536 // CHECK17: omp.dispatch.end: 10537 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10538 // CHECK17-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 10539 // CHECK17-NEXT: br i1 [[TMP13]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 10540 // CHECK17: .omp.linear.pu: 10541 // CHECK17-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8 10542 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP14]], 27 10543 // CHECK17-NEXT: store i64 [[ADD6]], i64* [[K_ADDR]], align 8 10544 // CHECK17-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 10545 // CHECK17: .omp.linear.pu.done: 10546 // CHECK17-NEXT: ret void 10547 // 10548 // 10549 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 10550 // CHECK17-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR2]] { 10551 // CHECK17-NEXT: entry: 10552 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 10553 // CHECK17-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 10554 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 10555 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 10556 // CHECK17-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 10557 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 10558 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 10559 // CHECK17-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 10560 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 10561 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 10562 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 10563 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 10564 // CHECK17-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 10565 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 10566 // CHECK17-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 10567 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 10568 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 10569 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 10570 // CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 10571 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 10572 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 10573 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 10574 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 10575 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 10576 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 10577 // CHECK17-NEXT: ret void 10578 // 10579 // 10580 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..2 10581 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR3]] { 10582 // CHECK17-NEXT: entry: 10583 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10584 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10585 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 10586 // CHECK17-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 10587 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 10588 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 10589 // CHECK17-NEXT: [[TMP:%.*]] = alloca i64, align 8 10590 // CHECK17-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 10591 // CHECK17-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 10592 // CHECK17-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 10593 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 10594 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 10595 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 10596 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10597 // CHECK17-NEXT: [[IT:%.*]] = alloca i64, align 8 10598 // CHECK17-NEXT: [[LIN4:%.*]] = alloca i32, align 4 10599 // CHECK17-NEXT: [[A5:%.*]] = alloca i32, align 4 10600 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10601 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10602 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 10603 // CHECK17-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 10604 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 10605 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 10606 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 10607 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 10608 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 10609 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 10610 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 10611 // CHECK17-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 10612 // CHECK17-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 10613 // CHECK17-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 10614 // CHECK17-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 10615 // CHECK17-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 10616 // CHECK17-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 10617 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10618 // CHECK17-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10619 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 10620 // CHECK17-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 10621 // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 10622 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 10623 // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 10624 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10625 // CHECK17: cond.true: 10626 // CHECK17-NEXT: br label [[COND_END:%.*]] 10627 // CHECK17: cond.false: 10628 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 10629 // CHECK17-NEXT: br label [[COND_END]] 10630 // CHECK17: cond.end: 10631 // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 10632 // CHECK17-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 10633 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 10634 // CHECK17-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 10635 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10636 // CHECK17: omp.inner.for.cond: 10637 // CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 10638 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 10639 // CHECK17-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 10640 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10641 // CHECK17: omp.inner.for.body: 10642 // CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 10643 // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 10644 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 10645 // CHECK17-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 10646 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 10647 // CHECK17-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 10648 // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 10649 // CHECK17-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 10650 // CHECK17-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 10651 // CHECK17-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 10652 // CHECK17-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 10653 // CHECK17-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4 10654 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 10655 // CHECK17-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 10656 // CHECK17-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 10657 // CHECK17-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 10658 // CHECK17-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 10659 // CHECK17-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 10660 // CHECK17-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 10661 // CHECK17-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4 10662 // CHECK17-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8 10663 // CHECK17-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 10664 // CHECK17-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 10665 // CHECK17-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 10666 // CHECK17-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8 10667 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10668 // CHECK17: omp.body.continue: 10669 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10670 // CHECK17: omp.inner.for.inc: 10671 // CHECK17-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 10672 // CHECK17-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 10673 // CHECK17-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8 10674 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] 10675 // CHECK17: omp.inner.for.end: 10676 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10677 // CHECK17: omp.loop.exit: 10678 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 10679 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10680 // CHECK17-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 10681 // CHECK17-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 10682 // CHECK17: .omp.linear.pu: 10683 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 10684 // CHECK17-NEXT: [[CONV18:%.*]] = sext i32 [[TMP20]] to i64 10685 // CHECK17-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 10686 // CHECK17-NEXT: [[MUL19:%.*]] = mul i64 4, [[TMP21]] 10687 // CHECK17-NEXT: [[ADD20:%.*]] = add i64 [[CONV18]], [[MUL19]] 10688 // CHECK17-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD20]] to i32 10689 // CHECK17-NEXT: store i32 [[CONV21]], i32* [[CONV1]], align 8 10690 // CHECK17-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 10691 // CHECK17-NEXT: [[CONV22:%.*]] = sext i32 [[TMP22]] to i64 10692 // CHECK17-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 10693 // CHECK17-NEXT: [[MUL23:%.*]] = mul i64 4, [[TMP23]] 10694 // CHECK17-NEXT: [[ADD24:%.*]] = add i64 [[CONV22]], [[MUL23]] 10695 // CHECK17-NEXT: [[CONV25:%.*]] = trunc i64 [[ADD24]] to i32 10696 // CHECK17-NEXT: store i32 [[CONV25]], i32* [[CONV2]], align 8 10697 // CHECK17-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 10698 // CHECK17: .omp.linear.pu.done: 10699 // CHECK17-NEXT: ret void 10700 // 10701 // 10702 // CHECK17-LABEL: define {{[^@]+}}@.omp_task_privates_map. 10703 // CHECK17-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i16** noalias [[TMP1:%.*]], [3 x i8*]** noalias [[TMP2:%.*]], [3 x i8*]** noalias [[TMP3:%.*]], [3 x i64]** noalias [[TMP4:%.*]]) #[[ATTR6:[0-9]+]] { 10704 // CHECK17-NEXT: entry: 10705 // CHECK17-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8 10706 // CHECK17-NEXT: [[DOTADDR1:%.*]] = alloca i16**, align 8 10707 // CHECK17-NEXT: [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 8 10708 // CHECK17-NEXT: [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 8 10709 // CHECK17-NEXT: [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 8 10710 // CHECK17-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8 10711 // CHECK17-NEXT: store i16** [[TMP1]], i16*** [[DOTADDR1]], align 8 10712 // CHECK17-NEXT: store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 8 10713 // CHECK17-NEXT: store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 8 10714 // CHECK17-NEXT: store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 8 10715 // CHECK17-NEXT: [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8 10716 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0 10717 // CHECK17-NEXT: [[TMP7:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 8 10718 // CHECK17-NEXT: store [3 x i8*]* [[TMP6]], [3 x i8*]** [[TMP7]], align 8 10719 // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1 10720 // CHECK17-NEXT: [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 8 10721 // CHECK17-NEXT: store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 8 10722 // CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2 10723 // CHECK17-NEXT: [[TMP11:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 8 10724 // CHECK17-NEXT: store [3 x i64]* [[TMP10]], [3 x i64]** [[TMP11]], align 8 10725 // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3 10726 // CHECK17-NEXT: [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 8 10727 // CHECK17-NEXT: store i16* [[TMP12]], i16** [[TMP13]], align 8 10728 // CHECK17-NEXT: ret void 10729 // 10730 // 10731 // CHECK17-LABEL: define {{[^@]+}}@.omp_task_entry. 10732 // CHECK17-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] { 10733 // CHECK17-NEXT: entry: 10734 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 10735 // CHECK17-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 10736 // CHECK17-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 10737 // CHECK17-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 10738 // CHECK17-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 10739 // CHECK17-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 10740 // CHECK17-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 8 10741 // CHECK17-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 8 10742 // CHECK17-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 8 10743 // CHECK17-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 8 10744 // CHECK17-NEXT: [[AA_CASTED_I:%.*]] = alloca i64, align 8 10745 // CHECK17-NEXT: [[LIN_CASTED_I:%.*]] = alloca i64, align 8 10746 // CHECK17-NEXT: [[A_CASTED_I:%.*]] = alloca i64, align 8 10747 // CHECK17-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 10748 // CHECK17-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 10749 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 10750 // CHECK17-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 10751 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 10752 // CHECK17-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 10753 // CHECK17-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 10754 // CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 10755 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 10756 // CHECK17-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 10757 // CHECK17-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 10758 // CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 10759 // CHECK17-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* 10760 // CHECK17-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 10761 // CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) 10762 // CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) 10763 // CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) 10764 // CHECK17-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) 10765 // CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 10766 // CHECK17-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 10767 // CHECK17-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 10768 // CHECK17-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 10769 // CHECK17-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 10770 // CHECK17-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !24 10771 // CHECK17-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !24 10772 // CHECK17-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 10773 // CHECK17-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 10774 // CHECK17-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* 10775 // CHECK17-NEXT: call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] 10776 // CHECK17-NEXT: [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !24 10777 // CHECK17-NEXT: [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !24 10778 // CHECK17-NEXT: [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !24 10779 // CHECK17-NEXT: [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !24 10780 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i64 0, i64 0 10781 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i64 0, i64 0 10782 // CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i64 0, i64 0 10783 // CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1 10784 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2 10785 // CHECK17-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 10786 // CHECK17-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 10787 // CHECK17-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] 10788 // CHECK17: omp_offload.failed.i: 10789 // CHECK17-NEXT: [[TMP27:%.*]] = load i16, i16* [[TMP16]], align 2 10790 // CHECK17-NEXT: [[CONV_I:%.*]] = bitcast i64* [[AA_CASTED_I]] to i16* 10791 // CHECK17-NEXT: store i16 [[TMP27]], i16* [[CONV_I]], align 2, !noalias !24 10792 // CHECK17-NEXT: [[TMP28:%.*]] = load i64, i64* [[AA_CASTED_I]], align 8, !noalias !24 10793 // CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP23]], align 4 10794 // CHECK17-NEXT: [[CONV4_I:%.*]] = bitcast i64* [[LIN_CASTED_I]] to i32* 10795 // CHECK17-NEXT: store i32 [[TMP29]], i32* [[CONV4_I]], align 4, !noalias !24 10796 // CHECK17-NEXT: [[TMP30:%.*]] = load i64, i64* [[LIN_CASTED_I]], align 8, !noalias !24 10797 // CHECK17-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP24]], align 4 10798 // CHECK17-NEXT: [[CONV5_I:%.*]] = bitcast i64* [[A_CASTED_I]] to i32* 10799 // CHECK17-NEXT: store i32 [[TMP31]], i32* [[CONV5_I]], align 4, !noalias !24 10800 // CHECK17-NEXT: [[TMP32:%.*]] = load i64, i64* [[A_CASTED_I]], align 8, !noalias !24 10801 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138(i64 [[TMP28]], i64 [[TMP30]], i64 [[TMP32]]) #[[ATTR4]] 10802 // CHECK17-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] 10803 // CHECK17: .omp_outlined..3.exit: 10804 // CHECK17-NEXT: ret i32 0 10805 // 10806 // 10807 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 10808 // CHECK17-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] { 10809 // CHECK17-NEXT: entry: 10810 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 10811 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 10812 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 10813 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 10814 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 10815 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 10816 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 10817 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 10818 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 10819 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 10820 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 10821 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 10822 // CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 10823 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 10824 // CHECK17-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 10825 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 10826 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 10827 // CHECK17-NEXT: ret void 10828 // 10829 // 10830 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..4 10831 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR3]] { 10832 // CHECK17-NEXT: entry: 10833 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10834 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10835 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 10836 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 10837 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10838 // CHECK17-NEXT: [[TMP:%.*]] = alloca i16, align 2 10839 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10840 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10841 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10842 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10843 // CHECK17-NEXT: [[IT:%.*]] = alloca i16, align 2 10844 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10845 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10846 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 10847 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 10848 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 10849 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 10850 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10851 // CHECK17-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 10852 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10853 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10854 // CHECK17-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10855 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 10856 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10857 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10858 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 10859 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10860 // CHECK17: cond.true: 10861 // CHECK17-NEXT: br label [[COND_END:%.*]] 10862 // CHECK17: cond.false: 10863 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10864 // CHECK17-NEXT: br label [[COND_END]] 10865 // CHECK17: cond.end: 10866 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 10867 // CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10868 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10869 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 10870 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10871 // CHECK17: omp.inner.for.cond: 10872 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10873 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10874 // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 10875 // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10876 // CHECK17: omp.inner.for.body: 10877 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10878 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 10879 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 10880 // CHECK17-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 10881 // CHECK17-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2 10882 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 10883 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 10884 // CHECK17-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8 10885 // CHECK17-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8 10886 // CHECK17-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 10887 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 10888 // CHECK17-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 10889 // CHECK17-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8 10890 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10891 // CHECK17: omp.body.continue: 10892 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10893 // CHECK17: omp.inner.for.inc: 10894 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10895 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 10896 // CHECK17-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 10897 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] 10898 // CHECK17: omp.inner.for.end: 10899 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10900 // CHECK17: omp.loop.exit: 10901 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 10902 // CHECK17-NEXT: ret void 10903 // 10904 // 10905 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 10906 // CHECK17-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 10907 // CHECK17-NEXT: entry: 10908 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 10909 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 10910 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 10911 // CHECK17-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 10912 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 10913 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 10914 // CHECK17-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 10915 // CHECK17-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 10916 // CHECK17-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 10917 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 10918 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 10919 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 10920 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 10921 // CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 10922 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 10923 // CHECK17-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 10924 // CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 10925 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 10926 // CHECK17-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 10927 // CHECK17-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 10928 // CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 10929 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 10930 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 10931 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 10932 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 10933 // CHECK17-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 10934 // CHECK17-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 10935 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 10936 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 10937 // CHECK17-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 10938 // CHECK17-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 10939 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 10940 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 10941 // CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 10942 // CHECK17-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 10943 // CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 10944 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 10945 // CHECK17-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 10946 // CHECK17-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 10947 // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 10948 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 10949 // CHECK17-NEXT: ret void 10950 // 10951 // 10952 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..7 10953 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 10954 // CHECK17-NEXT: entry: 10955 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10956 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10957 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 10958 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 10959 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 10960 // CHECK17-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 10961 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 10962 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 10963 // CHECK17-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 10964 // CHECK17-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 10965 // CHECK17-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 10966 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 10967 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10968 // CHECK17-NEXT: [[TMP:%.*]] = alloca i8, align 1 10969 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10970 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10971 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10972 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10973 // CHECK17-NEXT: [[IT:%.*]] = alloca i8, align 1 10974 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10975 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10976 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 10977 // CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 10978 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 10979 // CHECK17-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 10980 // CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 10981 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 10982 // CHECK17-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 10983 // CHECK17-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 10984 // CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 10985 // CHECK17-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 10986 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 10987 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 10988 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 10989 // CHECK17-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 10990 // CHECK17-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 10991 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 10992 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 10993 // CHECK17-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 10994 // CHECK17-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 10995 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 10996 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10997 // CHECK17-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 10998 // CHECK17-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10999 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11000 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 11001 // CHECK17-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11002 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 11003 // CHECK17-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 11004 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 11005 // CHECK17: omp.dispatch.cond: 11006 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11007 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 11008 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11009 // CHECK17: cond.true: 11010 // CHECK17-NEXT: br label [[COND_END:%.*]] 11011 // CHECK17: cond.false: 11012 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11013 // CHECK17-NEXT: br label [[COND_END]] 11014 // CHECK17: cond.end: 11015 // CHECK17-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 11016 // CHECK17-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 11017 // CHECK17-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11018 // CHECK17-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 11019 // CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11020 // CHECK17-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11021 // CHECK17-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 11022 // CHECK17-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 11023 // CHECK17: omp.dispatch.body: 11024 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11025 // CHECK17: omp.inner.for.cond: 11026 // CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11027 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11028 // CHECK17-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 11029 // CHECK17-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11030 // CHECK17: omp.inner.for.body: 11031 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11032 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 11033 // CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 11034 // CHECK17-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 11035 // CHECK17-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1 11036 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8 11037 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 11038 // CHECK17-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 11039 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 11040 // CHECK17-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 11041 // CHECK17-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 11042 // CHECK17-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 11043 // CHECK17-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 11044 // CHECK17-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4 11045 // CHECK17-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 11046 // CHECK17-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4 11047 // CHECK17-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 11048 // CHECK17-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 11049 // CHECK17-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 11050 // CHECK17-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4 11051 // CHECK17-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 11052 // CHECK17-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 11053 // CHECK17-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8 11054 // CHECK17-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 11055 // CHECK17-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8 11056 // CHECK17-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 11057 // CHECK17-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 11058 // CHECK17-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 11059 // CHECK17-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8 11060 // CHECK17-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 11061 // CHECK17-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8 11062 // CHECK17-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 11063 // CHECK17-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8 11064 // CHECK17-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 11065 // CHECK17-NEXT: store i64 [[ADD22]], i64* [[X]], align 8 11066 // CHECK17-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 11067 // CHECK17-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8 11068 // CHECK17-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 11069 // CHECK17-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 11070 // CHECK17-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 11071 // CHECK17-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8 11072 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11073 // CHECK17: omp.body.continue: 11074 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11075 // CHECK17: omp.inner.for.inc: 11076 // CHECK17-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11077 // CHECK17-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 11078 // CHECK17-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4 11079 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] 11080 // CHECK17: omp.inner.for.end: 11081 // CHECK17-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 11082 // CHECK17: omp.dispatch.inc: 11083 // CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11084 // CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 11085 // CHECK17-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 11086 // CHECK17-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 11087 // CHECK17-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11088 // CHECK17-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 11089 // CHECK17-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 11090 // CHECK17-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 11091 // CHECK17-NEXT: br label [[OMP_DISPATCH_COND]] 11092 // CHECK17: omp.dispatch.end: 11093 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 11094 // CHECK17-NEXT: ret void 11095 // 11096 // 11097 // CHECK17-LABEL: define {{[^@]+}}@_Z3bari 11098 // CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 11099 // CHECK17-NEXT: entry: 11100 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 11101 // CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4 11102 // CHECK17-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 11103 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 11104 // CHECK17-NEXT: store i32 0, i32* [[A]], align 4 11105 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 11106 // CHECK17-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 11107 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 11108 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 11109 // CHECK17-NEXT: store i32 [[ADD]], i32* [[A]], align 4 11110 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 11111 // CHECK17-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 11112 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 11113 // CHECK17-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 11114 // CHECK17-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 11115 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 11116 // CHECK17-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 11117 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 11118 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 11119 // CHECK17-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 11120 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 11121 // CHECK17-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 11122 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 11123 // CHECK17-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 11124 // CHECK17-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 11125 // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 11126 // CHECK17-NEXT: ret i32 [[TMP8]] 11127 // 11128 // 11129 // CHECK17-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 11130 // CHECK17-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 11131 // CHECK17-NEXT: entry: 11132 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 11133 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 11134 // CHECK17-NEXT: [[B:%.*]] = alloca i32, align 4 11135 // CHECK17-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 11136 // CHECK17-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 11137 // CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 11138 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 11139 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 11140 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 11141 // CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 11142 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 11143 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 11144 // CHECK17-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 11145 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 11146 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 11147 // CHECK17-NEXT: store i32 [[ADD]], i32* [[B]], align 4 11148 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 11149 // CHECK17-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 11150 // CHECK17-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 11151 // CHECK17-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 11152 // CHECK17-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 11153 // CHECK17-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 11154 // CHECK17-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 11155 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 11156 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 11157 // CHECK17-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4 11158 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 11159 // CHECK17-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4 11160 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 11161 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11162 // CHECK17: omp_if.then: 11163 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 11164 // CHECK17-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]] 11165 // CHECK17-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 11166 // CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11167 // CHECK17-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 11168 // CHECK17-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8 11169 // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11170 // CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 11171 // CHECK17-NEXT: store double* [[A]], double** [[TMP13]], align 8 11172 // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 11173 // CHECK17-NEXT: store i64 8, i64* [[TMP14]], align 8 11174 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 11175 // CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 11176 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 11177 // CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 11178 // CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 11179 // CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 11180 // CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 11181 // CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 11182 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 11183 // CHECK17-NEXT: store i64 4, i64* [[TMP20]], align 8 11184 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 11185 // CHECK17-NEXT: store i8* null, i8** [[TMP21]], align 8 11186 // CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 11187 // CHECK17-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* 11188 // CHECK17-NEXT: store i64 2, i64* [[TMP23]], align 8 11189 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 11190 // CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 11191 // CHECK17-NEXT: store i64 2, i64* [[TMP25]], align 8 11192 // CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 11193 // CHECK17-NEXT: store i64 8, i64* [[TMP26]], align 8 11194 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 11195 // CHECK17-NEXT: store i8* null, i8** [[TMP27]], align 8 11196 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 11197 // CHECK17-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 11198 // CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 11199 // CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 11200 // CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* 11201 // CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 11202 // CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 11203 // CHECK17-NEXT: store i64 8, i64* [[TMP32]], align 8 11204 // CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 11205 // CHECK17-NEXT: store i8* null, i8** [[TMP33]], align 8 11206 // CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 11207 // CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 11208 // CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 11209 // CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 11210 // CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 11211 // CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 11212 // CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 11213 // CHECK17-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 11214 // CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 11215 // CHECK17-NEXT: store i8* null, i8** [[TMP39]], align 8 11216 // CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11217 // CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11218 // CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 11219 // CHECK17-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11220 // CHECK17-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 11221 // CHECK17-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11222 // CHECK17: omp_offload.failed: 11223 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 11224 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] 11225 // CHECK17: omp_offload.cont: 11226 // CHECK17-NEXT: br label [[OMP_IF_END:%.*]] 11227 // CHECK17: omp_if.else: 11228 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 11229 // CHECK17-NEXT: br label [[OMP_IF_END]] 11230 // CHECK17: omp_if.end: 11231 // CHECK17-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] 11232 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] 11233 // CHECK17-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 11234 // CHECK17-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 11235 // CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 11236 // CHECK17-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 11237 // CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] 11238 // CHECK17-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 11239 // CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 11240 // CHECK17-NEXT: ret i32 [[ADD4]] 11241 // 11242 // 11243 // CHECK17-LABEL: define {{[^@]+}}@_ZL7fstatici 11244 // CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 11245 // CHECK17-NEXT: entry: 11246 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 11247 // CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4 11248 // CHECK17-NEXT: [[AA:%.*]] = alloca i16, align 2 11249 // CHECK17-NEXT: [[AAA:%.*]] = alloca i8, align 1 11250 // CHECK17-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 11251 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 11252 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 11253 // CHECK17-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 11254 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 11255 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 11256 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 11257 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 11258 // CHECK17-NEXT: store i32 0, i32* [[A]], align 4 11259 // CHECK17-NEXT: store i16 0, i16* [[AA]], align 2 11260 // CHECK17-NEXT: store i8 0, i8* [[AAA]], align 1 11261 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 11262 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 11263 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 11264 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 11265 // CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 11266 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 11267 // CHECK17-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 11268 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 11269 // CHECK17-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 11270 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 11271 // CHECK17-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 11272 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 11273 // CHECK17-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 11274 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 11275 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11276 // CHECK17: omp_if.then: 11277 // CHECK17-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11278 // CHECK17-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 11279 // CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 11280 // CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11281 // CHECK17-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 11282 // CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 11283 // CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 11284 // CHECK17-NEXT: store i8* null, i8** [[TMP11]], align 8 11285 // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 11286 // CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 11287 // CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 11288 // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 11289 // CHECK17-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 11290 // CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 11291 // CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 11292 // CHECK17-NEXT: store i8* null, i8** [[TMP16]], align 8 11293 // CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 11294 // CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* 11295 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 11296 // CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 11297 // CHECK17-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 11298 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 11299 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 11300 // CHECK17-NEXT: store i8* null, i8** [[TMP21]], align 8 11301 // CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 11302 // CHECK17-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 11303 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 11304 // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 11305 // CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 11306 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 11307 // CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 11308 // CHECK17-NEXT: store i8* null, i8** [[TMP26]], align 8 11309 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11310 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11311 // CHECK17-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11312 // CHECK17-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 11313 // CHECK17-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11314 // CHECK17: omp_offload.failed: 11315 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 11316 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] 11317 // CHECK17: omp_offload.cont: 11318 // CHECK17-NEXT: br label [[OMP_IF_END:%.*]] 11319 // CHECK17: omp_if.else: 11320 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 11321 // CHECK17-NEXT: br label [[OMP_IF_END]] 11322 // CHECK17: omp_if.end: 11323 // CHECK17-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 11324 // CHECK17-NEXT: ret i32 [[TMP31]] 11325 // 11326 // 11327 // CHECK17-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 11328 // CHECK17-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 11329 // CHECK17-NEXT: entry: 11330 // CHECK17-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 11331 // CHECK17-NEXT: [[A:%.*]] = alloca i32, align 4 11332 // CHECK17-NEXT: [[AA:%.*]] = alloca i16, align 2 11333 // CHECK17-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 11334 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 11335 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 11336 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 11337 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 11338 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 11339 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 11340 // CHECK17-NEXT: store i32 0, i32* [[A]], align 4 11341 // CHECK17-NEXT: store i16 0, i16* [[AA]], align 2 11342 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 11343 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 11344 // CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 11345 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 11346 // CHECK17-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 11347 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 11348 // CHECK17-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 11349 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 11350 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 11351 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 11352 // CHECK17-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11353 // CHECK17: omp_if.then: 11354 // CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11355 // CHECK17-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 11356 // CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 11357 // CHECK17-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11358 // CHECK17-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 11359 // CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 11360 // CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 11361 // CHECK17-NEXT: store i8* null, i8** [[TMP9]], align 8 11362 // CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 11363 // CHECK17-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* 11364 // CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 11365 // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 11366 // CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 11367 // CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 11368 // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 11369 // CHECK17-NEXT: store i8* null, i8** [[TMP14]], align 8 11370 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 11371 // CHECK17-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 11372 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 11373 // CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 11374 // CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 11375 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 11376 // CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 11377 // CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8 11378 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11379 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11380 // CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11381 // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 11382 // CHECK17-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11383 // CHECK17: omp_offload.failed: 11384 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 11385 // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] 11386 // CHECK17: omp_offload.cont: 11387 // CHECK17-NEXT: br label [[OMP_IF_END:%.*]] 11388 // CHECK17: omp_if.else: 11389 // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 11390 // CHECK17-NEXT: br label [[OMP_IF_END]] 11391 // CHECK17: omp_if.end: 11392 // CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 11393 // CHECK17-NEXT: ret i32 [[TMP24]] 11394 // 11395 // 11396 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 11397 // CHECK17-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 11398 // CHECK17-NEXT: entry: 11399 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 11400 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 11401 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 11402 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 11403 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 11404 // CHECK17-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 11405 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 11406 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 11407 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 11408 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 11409 // CHECK17-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 11410 // CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 11411 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 11412 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 11413 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 11414 // CHECK17-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 11415 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 11416 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 11417 // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 11418 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 11419 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 11420 // CHECK17-NEXT: ret void 11421 // 11422 // 11423 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9 11424 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 11425 // CHECK17-NEXT: entry: 11426 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11427 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11428 // CHECK17-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 11429 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 11430 // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 11431 // CHECK17-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 11432 // CHECK17-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 11433 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 11434 // CHECK17-NEXT: [[TMP:%.*]] = alloca i64, align 8 11435 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 11436 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 11437 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 11438 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11439 // CHECK17-NEXT: [[IT:%.*]] = alloca i64, align 8 11440 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11441 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11442 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 11443 // CHECK17-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 11444 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 11445 // CHECK17-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 11446 // CHECK17-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 11447 // CHECK17-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 11448 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 11449 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 11450 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 11451 // CHECK17-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 11452 // CHECK17-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 11453 // CHECK17-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 11454 // CHECK17-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 11455 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11456 // CHECK17-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11457 // CHECK17-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 11458 // CHECK17-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 11459 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11460 // CHECK17-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 11461 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11462 // CHECK17: cond.true: 11463 // CHECK17-NEXT: br label [[COND_END:%.*]] 11464 // CHECK17: cond.false: 11465 // CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11466 // CHECK17-NEXT: br label [[COND_END]] 11467 // CHECK17: cond.end: 11468 // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 11469 // CHECK17-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 11470 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 11471 // CHECK17-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 11472 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11473 // CHECK17: omp.inner.for.cond: 11474 // CHECK17-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 11475 // CHECK17-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11476 // CHECK17-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 11477 // CHECK17-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11478 // CHECK17: omp.inner.for.body: 11479 // CHECK17-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 11480 // CHECK17-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 11481 // CHECK17-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 11482 // CHECK17-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 11483 // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8 11484 // CHECK17-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 11485 // CHECK17-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 11486 // CHECK17-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 11487 // CHECK17-NEXT: store double [[ADD]], double* [[A]], align 8 11488 // CHECK17-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 11489 // CHECK17-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8 11490 // CHECK17-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 11491 // CHECK17-NEXT: store double [[INC]], double* [[A5]], align 8 11492 // CHECK17-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 11493 // CHECK17-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 11494 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 11495 // CHECK17-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 11496 // CHECK17-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2 11497 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11498 // CHECK17: omp.body.continue: 11499 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11500 // CHECK17: omp.inner.for.inc: 11501 // CHECK17-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 11502 // CHECK17-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 11503 // CHECK17-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 11504 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] 11505 // CHECK17: omp.inner.for.end: 11506 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11507 // CHECK17: omp.loop.exit: 11508 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 11509 // CHECK17-NEXT: ret void 11510 // 11511 // 11512 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 11513 // CHECK17-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 11514 // CHECK17-NEXT: entry: 11515 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 11516 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 11517 // CHECK17-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 11518 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 11519 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 11520 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 11521 // CHECK17-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 11522 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 11523 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 11524 // CHECK17-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 11525 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 11526 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 11527 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 11528 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 11529 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 11530 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 11531 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 11532 // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 11533 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 11534 // CHECK17-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 11535 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 11536 // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 11537 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 11538 // CHECK17-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 11539 // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 11540 // CHECK17-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 11541 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 11542 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 11543 // CHECK17-NEXT: ret void 11544 // 11545 // 11546 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 11547 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 11548 // CHECK17-NEXT: entry: 11549 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11550 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11551 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 11552 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 11553 // CHECK17-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 11554 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 11555 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11556 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 11557 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11558 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11559 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 11560 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 11561 // CHECK17-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 11562 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 11563 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 11564 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 11565 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 11566 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 11567 // CHECK17-NEXT: ret void 11568 // 11569 // 11570 // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 11571 // CHECK17-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 11572 // CHECK17-NEXT: entry: 11573 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 11574 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 11575 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 11576 // CHECK17-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 11577 // CHECK17-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 11578 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 11579 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 11580 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 11581 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 11582 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 11583 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 11584 // CHECK17-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 11585 // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 11586 // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 11587 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 11588 // CHECK17-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 11589 // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 11590 // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 11591 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 11592 // CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 11593 // CHECK17-NEXT: ret void 11594 // 11595 // 11596 // CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 11597 // CHECK17-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 11598 // CHECK17-NEXT: entry: 11599 // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11600 // CHECK17-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11601 // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 11602 // CHECK17-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 11603 // CHECK17-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 11604 // CHECK17-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 11605 // CHECK17-NEXT: [[TMP:%.*]] = alloca i64, align 8 11606 // CHECK17-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 11607 // CHECK17-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 11608 // CHECK17-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 11609 // CHECK17-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11610 // CHECK17-NEXT: [[I:%.*]] = alloca i64, align 8 11611 // CHECK17-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11612 // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11613 // CHECK17-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 11614 // CHECK17-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 11615 // CHECK17-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 11616 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 11617 // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 11618 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 11619 // CHECK17-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 11620 // CHECK17-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 11621 // CHECK17-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 11622 // CHECK17-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11623 // CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11624 // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 11625 // CHECK17-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 11626 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11627 // CHECK17-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 11628 // CHECK17-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11629 // CHECK17: cond.true: 11630 // CHECK17-NEXT: br label [[COND_END:%.*]] 11631 // CHECK17: cond.false: 11632 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11633 // CHECK17-NEXT: br label [[COND_END]] 11634 // CHECK17: cond.end: 11635 // CHECK17-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 11636 // CHECK17-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 11637 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 11638 // CHECK17-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 11639 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11640 // CHECK17: omp.inner.for.cond: 11641 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 11642 // CHECK17-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 11643 // CHECK17-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 11644 // CHECK17-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11645 // CHECK17: omp.inner.for.body: 11646 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 11647 // CHECK17-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 11648 // CHECK17-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 11649 // CHECK17-NEXT: store i64 [[ADD]], i64* [[I]], align 8 11650 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8 11651 // CHECK17-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 11652 // CHECK17-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8 11653 // CHECK17-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8 11654 // CHECK17-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 11655 // CHECK17-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 11656 // CHECK17-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 11657 // CHECK17-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8 11658 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 11659 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 11660 // CHECK17-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 11661 // CHECK17-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4 11662 // CHECK17-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11663 // CHECK17: omp.body.continue: 11664 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11665 // CHECK17: omp.inner.for.inc: 11666 // CHECK17-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 11667 // CHECK17-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 11668 // CHECK17-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 11669 // CHECK17-NEXT: br label [[OMP_INNER_FOR_COND]] 11670 // CHECK17: omp.inner.for.end: 11671 // CHECK17-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11672 // CHECK17: omp.loop.exit: 11673 // CHECK17-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 11674 // CHECK17-NEXT: ret void 11675 // 11676 // 11677 // CHECK17-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 11678 // CHECK17-SAME: () #[[ATTR6]] { 11679 // CHECK17-NEXT: entry: 11680 // CHECK17-NEXT: call void @__tgt_register_requires(i64 1) 11681 // CHECK17-NEXT: ret void 11682 // 11683 // 11684 // CHECK18-LABEL: define {{[^@]+}}@_Z7get_valv 11685 // CHECK18-SAME: () #[[ATTR0:[0-9]+]] { 11686 // CHECK18-NEXT: entry: 11687 // CHECK18-NEXT: ret i64 0 11688 // 11689 // 11690 // CHECK18-LABEL: define {{[^@]+}}@_Z3fooi 11691 // CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 11692 // CHECK18-NEXT: entry: 11693 // CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 11694 // CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4 11695 // CHECK18-NEXT: [[AA:%.*]] = alloca i16, align 2 11696 // CHECK18-NEXT: [[B:%.*]] = alloca [10 x float], align 4 11697 // CHECK18-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 11698 // CHECK18-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 11699 // CHECK18-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 11700 // CHECK18-NEXT: [[__VLA_EXPR1:%.*]] = alloca i64, align 8 11701 // CHECK18-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 8 11702 // CHECK18-NEXT: [[K:%.*]] = alloca i64, align 8 11703 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 11704 // CHECK18-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 11705 // CHECK18-NEXT: [[LIN:%.*]] = alloca i32, align 4 11706 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 11707 // CHECK18-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 11708 // CHECK18-NEXT: [[A_CASTED4:%.*]] = alloca i64, align 8 11709 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 11710 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 11711 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 11712 // CHECK18-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4 11713 // CHECK18-NEXT: [[A_CASTED6:%.*]] = alloca i64, align 8 11714 // CHECK18-NEXT: [[AA_CASTED8:%.*]] = alloca i64, align 8 11715 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [2 x i8*], align 8 11716 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [2 x i8*], align 8 11717 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [2 x i8*], align 8 11718 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 11719 // CHECK18-NEXT: [[A_CASTED15:%.*]] = alloca i64, align 8 11720 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 11721 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 11722 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 11723 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 11724 // CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 11725 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 11726 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 11727 // CHECK18-NEXT: store i32 0, i32* [[A]], align 4 11728 // CHECK18-NEXT: store i16 0, i16* [[AA]], align 2 11729 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 11730 // CHECK18-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 11731 // CHECK18-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 11732 // CHECK18-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 11733 // CHECK18-NEXT: [[VLA:%.*]] = alloca float, i64 [[TMP2]], align 4 11734 // CHECK18-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 11735 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 11736 // CHECK18-NEXT: [[TMP5:%.*]] = zext i32 [[TMP4]] to i64 11737 // CHECK18-NEXT: [[TMP6:%.*]] = mul nuw i64 5, [[TMP5]] 11738 // CHECK18-NEXT: [[VLA1:%.*]] = alloca double, i64 [[TMP6]], align 8 11739 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR1]], align 8 11740 // CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) 11741 // CHECK18-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 11742 // CHECK18-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11743 // CHECK18: omp_offload.failed: 11744 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103() #[[ATTR4:[0-9]+]] 11745 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] 11746 // CHECK18: omp_offload.cont: 11747 // CHECK18-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 11748 // CHECK18-NEXT: store i64 [[CALL]], i64* [[K]], align 8 11749 // CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[A]], align 4 11750 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 11751 // CHECK18-NEXT: store i32 [[TMP9]], i32* [[CONV]], align 4 11752 // CHECK18-NEXT: [[TMP10:%.*]] = load i64, i64* [[A_CASTED]], align 8 11753 // CHECK18-NEXT: [[TMP11:%.*]] = load i64, i64* [[K]], align 8 11754 // CHECK18-NEXT: store i64 [[TMP11]], i64* [[K_CASTED]], align 8 11755 // CHECK18-NEXT: [[TMP12:%.*]] = load i64, i64* [[K_CASTED]], align 8 11756 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i64 [[TMP10]], i64 [[TMP12]]) #[[ATTR4]] 11757 // CHECK18-NEXT: store i32 12, i32* [[LIN]], align 4 11758 // CHECK18-NEXT: [[TMP13:%.*]] = load i16, i16* [[AA]], align 2 11759 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 11760 // CHECK18-NEXT: store i16 [[TMP13]], i16* [[CONV2]], align 2 11761 // CHECK18-NEXT: [[TMP14:%.*]] = load i64, i64* [[AA_CASTED]], align 8 11762 // CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[LIN]], align 4 11763 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 11764 // CHECK18-NEXT: store i32 [[TMP15]], i32* [[CONV3]], align 4 11765 // CHECK18-NEXT: [[TMP16:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 11766 // CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[A]], align 4 11767 // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED4]] to i32* 11768 // CHECK18-NEXT: store i32 [[TMP17]], i32* [[CONV5]], align 4 11769 // CHECK18-NEXT: [[TMP18:%.*]] = load i64, i64* [[A_CASTED4]], align 8 11770 // CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11771 // CHECK18-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 11772 // CHECK18-NEXT: store i64 [[TMP14]], i64* [[TMP20]], align 8 11773 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11774 // CHECK18-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* 11775 // CHECK18-NEXT: store i64 [[TMP14]], i64* [[TMP22]], align 8 11776 // CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 11777 // CHECK18-NEXT: store i8* null, i8** [[TMP23]], align 8 11778 // CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 11779 // CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 11780 // CHECK18-NEXT: store i64 [[TMP16]], i64* [[TMP25]], align 8 11781 // CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 11782 // CHECK18-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* 11783 // CHECK18-NEXT: store i64 [[TMP16]], i64* [[TMP27]], align 8 11784 // CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 11785 // CHECK18-NEXT: store i8* null, i8** [[TMP28]], align 8 11786 // CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 11787 // CHECK18-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* 11788 // CHECK18-NEXT: store i64 [[TMP18]], i64* [[TMP30]], align 8 11789 // CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 11790 // CHECK18-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* 11791 // CHECK18-NEXT: store i64 [[TMP18]], i64* [[TMP32]], align 8 11792 // CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 11793 // CHECK18-NEXT: store i8* null, i8** [[TMP33]], align 8 11794 // CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11795 // CHECK18-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11796 // CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0 11797 // CHECK18-NEXT: [[TMP37:%.*]] = load i16, i16* [[AA]], align 2 11798 // CHECK18-NEXT: store i16 [[TMP37]], i16* [[TMP36]], align 4 11799 // CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1 11800 // CHECK18-NEXT: [[TMP39:%.*]] = load i32, i32* [[LIN]], align 4 11801 // CHECK18-NEXT: store i32 [[TMP39]], i32* [[TMP38]], align 4 11802 // CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2 11803 // CHECK18-NEXT: [[TMP41:%.*]] = load i32, i32* [[A]], align 4 11804 // CHECK18-NEXT: store i32 [[TMP41]], i32* [[TMP40]], align 4 11805 // CHECK18-NEXT: [[TMP42:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i64 120, i64 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 11806 // CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8* [[TMP42]] to %struct.kmp_task_t_with_privates* 11807 // CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP43]], i32 0, i32 0 11808 // CHECK18-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP44]], i32 0, i32 0 11809 // CHECK18-NEXT: [[TMP46:%.*]] = load i8*, i8** [[TMP45]], align 8 11810 // CHECK18-NEXT: [[TMP47:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* 11811 // CHECK18-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP46]], i8* align 4 [[TMP47]], i64 12, i1 false) 11812 // CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP43]], i32 0, i32 1 11813 // CHECK18-NEXT: [[TMP49:%.*]] = bitcast i8* [[TMP46]] to %struct.anon* 11814 // CHECK18-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 0 11815 // CHECK18-NEXT: [[TMP51:%.*]] = bitcast [3 x i8*]* [[TMP50]] to i8* 11816 // CHECK18-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP34]] to i8* 11817 // CHECK18-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP51]], i8* align 8 [[TMP52]], i64 24, i1 false) 11818 // CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 1 11819 // CHECK18-NEXT: [[TMP54:%.*]] = bitcast [3 x i8*]* [[TMP53]] to i8* 11820 // CHECK18-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP35]] to i8* 11821 // CHECK18-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP54]], i8* align 8 [[TMP55]], i64 24, i1 false) 11822 // CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 2 11823 // CHECK18-NEXT: [[TMP57:%.*]] = bitcast [3 x i64]* [[TMP56]] to i8* 11824 // CHECK18-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP57]], i8* align 8 bitcast ([3 x i64]* @.offload_sizes to i8*), i64 24, i1 false) 11825 // CHECK18-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP48]], i32 0, i32 3 11826 // CHECK18-NEXT: [[TMP59:%.*]] = load i16, i16* [[AA]], align 2 11827 // CHECK18-NEXT: store i16 [[TMP59]], i16* [[TMP58]], align 8 11828 // CHECK18-NEXT: [[TMP60:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP42]]) 11829 // CHECK18-NEXT: [[TMP61:%.*]] = load i32, i32* [[A]], align 4 11830 // CHECK18-NEXT: [[CONV7:%.*]] = bitcast i64* [[A_CASTED6]] to i32* 11831 // CHECK18-NEXT: store i32 [[TMP61]], i32* [[CONV7]], align 4 11832 // CHECK18-NEXT: [[TMP62:%.*]] = load i64, i64* [[A_CASTED6]], align 8 11833 // CHECK18-NEXT: [[TMP63:%.*]] = load i16, i16* [[AA]], align 2 11834 // CHECK18-NEXT: [[CONV9:%.*]] = bitcast i64* [[AA_CASTED8]] to i16* 11835 // CHECK18-NEXT: store i16 [[TMP63]], i16* [[CONV9]], align 2 11836 // CHECK18-NEXT: [[TMP64:%.*]] = load i64, i64* [[AA_CASTED8]], align 8 11837 // CHECK18-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_ADDR]], align 4 11838 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP65]], 10 11839 // CHECK18-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11840 // CHECK18: omp_if.then: 11841 // CHECK18-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 11842 // CHECK18-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* 11843 // CHECK18-NEXT: store i64 [[TMP62]], i64* [[TMP67]], align 8 11844 // CHECK18-NEXT: [[TMP68:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 11845 // CHECK18-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 11846 // CHECK18-NEXT: store i64 [[TMP62]], i64* [[TMP69]], align 8 11847 // CHECK18-NEXT: [[TMP70:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 11848 // CHECK18-NEXT: store i8* null, i8** [[TMP70]], align 8 11849 // CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 1 11850 // CHECK18-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* 11851 // CHECK18-NEXT: store i64 [[TMP64]], i64* [[TMP72]], align 8 11852 // CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 1 11853 // CHECK18-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i64* 11854 // CHECK18-NEXT: store i64 [[TMP64]], i64* [[TMP74]], align 8 11855 // CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 1 11856 // CHECK18-NEXT: store i8* null, i8** [[TMP75]], align 8 11857 // CHECK18-NEXT: [[TMP76:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 11858 // CHECK18-NEXT: [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 11859 // CHECK18-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146.region_id, i32 2, i8** [[TMP76]], i8** [[TMP77]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11860 // CHECK18-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0 11861 // CHECK18-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]] 11862 // CHECK18: omp_offload.failed13: 11863 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i64 [[TMP62]], i64 [[TMP64]]) #[[ATTR4]] 11864 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT14]] 11865 // CHECK18: omp_offload.cont14: 11866 // CHECK18-NEXT: br label [[OMP_IF_END:%.*]] 11867 // CHECK18: omp_if.else: 11868 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i64 [[TMP62]], i64 [[TMP64]]) #[[ATTR4]] 11869 // CHECK18-NEXT: br label [[OMP_IF_END]] 11870 // CHECK18: omp_if.end: 11871 // CHECK18-NEXT: [[TMP80:%.*]] = load i32, i32* [[A]], align 4 11872 // CHECK18-NEXT: store i32 [[TMP80]], i32* [[DOTCAPTURE_EXPR_]], align 4 11873 // CHECK18-NEXT: [[TMP81:%.*]] = load i32, i32* [[A]], align 4 11874 // CHECK18-NEXT: [[CONV16:%.*]] = bitcast i64* [[A_CASTED15]] to i32* 11875 // CHECK18-NEXT: store i32 [[TMP81]], i32* [[CONV16]], align 4 11876 // CHECK18-NEXT: [[TMP82:%.*]] = load i64, i64* [[A_CASTED15]], align 8 11877 // CHECK18-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 11878 // CHECK18-NEXT: [[CONV17:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 11879 // CHECK18-NEXT: store i32 [[TMP83]], i32* [[CONV17]], align 4 11880 // CHECK18-NEXT: [[TMP84:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 11881 // CHECK18-NEXT: [[TMP85:%.*]] = load i32, i32* [[N_ADDR]], align 4 11882 // CHECK18-NEXT: [[CMP18:%.*]] = icmp sgt i32 [[TMP85]], 20 11883 // CHECK18-NEXT: br i1 [[CMP18]], label [[OMP_IF_THEN19:%.*]], label [[OMP_IF_ELSE25:%.*]] 11884 // CHECK18: omp_if.then19: 11885 // CHECK18-NEXT: [[TMP86:%.*]] = mul nuw i64 [[TMP2]], 4 11886 // CHECK18-NEXT: [[TMP87:%.*]] = mul nuw i64 5, [[TMP5]] 11887 // CHECK18-NEXT: [[TMP88:%.*]] = mul nuw i64 [[TMP87]], 8 11888 // CHECK18-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 11889 // CHECK18-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* 11890 // CHECK18-NEXT: store i64 [[TMP82]], i64* [[TMP90]], align 8 11891 // CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 11892 // CHECK18-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* 11893 // CHECK18-NEXT: store i64 [[TMP82]], i64* [[TMP92]], align 8 11894 // CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 11895 // CHECK18-NEXT: store i64 4, i64* [[TMP93]], align 8 11896 // CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 11897 // CHECK18-NEXT: store i8* null, i8** [[TMP94]], align 8 11898 // CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 11899 // CHECK18-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** 11900 // CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 8 11901 // CHECK18-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 11902 // CHECK18-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x float]** 11903 // CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP98]], align 8 11904 // CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 11905 // CHECK18-NEXT: store i64 40, i64* [[TMP99]], align 8 11906 // CHECK18-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 11907 // CHECK18-NEXT: store i8* null, i8** [[TMP100]], align 8 11908 // CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 11909 // CHECK18-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i64* 11910 // CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP102]], align 8 11911 // CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 11912 // CHECK18-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i64* 11913 // CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP104]], align 8 11914 // CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 11915 // CHECK18-NEXT: store i64 8, i64* [[TMP105]], align 8 11916 // CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 11917 // CHECK18-NEXT: store i8* null, i8** [[TMP106]], align 8 11918 // CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 11919 // CHECK18-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** 11920 // CHECK18-NEXT: store float* [[VLA]], float** [[TMP108]], align 8 11921 // CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 11922 // CHECK18-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to float** 11923 // CHECK18-NEXT: store float* [[VLA]], float** [[TMP110]], align 8 11924 // CHECK18-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 11925 // CHECK18-NEXT: store i64 [[TMP86]], i64* [[TMP111]], align 8 11926 // CHECK18-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 11927 // CHECK18-NEXT: store i8* null, i8** [[TMP112]], align 8 11928 // CHECK18-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 11929 // CHECK18-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** 11930 // CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 8 11931 // CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 11932 // CHECK18-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** 11933 // CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 11934 // CHECK18-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 11935 // CHECK18-NEXT: store i64 400, i64* [[TMP117]], align 8 11936 // CHECK18-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 11937 // CHECK18-NEXT: store i8* null, i8** [[TMP118]], align 8 11938 // CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 11939 // CHECK18-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* 11940 // CHECK18-NEXT: store i64 5, i64* [[TMP120]], align 8 11941 // CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 11942 // CHECK18-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* 11943 // CHECK18-NEXT: store i64 5, i64* [[TMP122]], align 8 11944 // CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 11945 // CHECK18-NEXT: store i64 8, i64* [[TMP123]], align 8 11946 // CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 11947 // CHECK18-NEXT: store i8* null, i8** [[TMP124]], align 8 11948 // CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 11949 // CHECK18-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* 11950 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 11951 // CHECK18-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 11952 // CHECK18-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* 11953 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 11954 // CHECK18-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 11955 // CHECK18-NEXT: store i64 8, i64* [[TMP129]], align 8 11956 // CHECK18-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 11957 // CHECK18-NEXT: store i8* null, i8** [[TMP130]], align 8 11958 // CHECK18-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 11959 // CHECK18-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** 11960 // CHECK18-NEXT: store double* [[VLA1]], double** [[TMP132]], align 8 11961 // CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 11962 // CHECK18-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** 11963 // CHECK18-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 11964 // CHECK18-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 11965 // CHECK18-NEXT: store i64 [[TMP88]], i64* [[TMP135]], align 8 11966 // CHECK18-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 11967 // CHECK18-NEXT: store i8* null, i8** [[TMP136]], align 8 11968 // CHECK18-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 11969 // CHECK18-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** 11970 // CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 11971 // CHECK18-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 11972 // CHECK18-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to %struct.TT** 11973 // CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP140]], align 8 11974 // CHECK18-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 11975 // CHECK18-NEXT: store i64 16, i64* [[TMP141]], align 8 11976 // CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 11977 // CHECK18-NEXT: store i8* null, i8** [[TMP142]], align 8 11978 // CHECK18-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 11979 // CHECK18-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* 11980 // CHECK18-NEXT: store i64 [[TMP84]], i64* [[TMP144]], align 8 11981 // CHECK18-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 11982 // CHECK18-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* 11983 // CHECK18-NEXT: store i64 [[TMP84]], i64* [[TMP146]], align 8 11984 // CHECK18-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 11985 // CHECK18-NEXT: store i64 4, i64* [[TMP147]], align 8 11986 // CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 11987 // CHECK18-NEXT: store i8* null, i8** [[TMP148]], align 8 11988 // CHECK18-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 11989 // CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 11990 // CHECK18-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 11991 // CHECK18-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 11992 // CHECK18-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 11993 // CHECK18-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] 11994 // CHECK18: omp_offload.failed23: 11995 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] 11996 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT24]] 11997 // CHECK18: omp_offload.cont24: 11998 // CHECK18-NEXT: br label [[OMP_IF_END26:%.*]] 11999 // CHECK18: omp_if.else25: 12000 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] 12001 // CHECK18-NEXT: br label [[OMP_IF_END26]] 12002 // CHECK18: omp_if.end26: 12003 // CHECK18-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 12004 // CHECK18-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 12005 // CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) 12006 // CHECK18-NEXT: ret i32 [[TMP154]] 12007 // 12008 // 12009 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 12010 // CHECK18-SAME: () #[[ATTR2:[0-9]+]] { 12011 // CHECK18-NEXT: entry: 12012 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 12013 // CHECK18-NEXT: ret void 12014 // 12015 // 12016 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined. 12017 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 12018 // CHECK18-NEXT: entry: 12019 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12020 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12021 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12022 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 12023 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12024 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12025 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12026 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12027 // CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4 12028 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12029 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12030 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12031 // CHECK18-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 12032 // CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12033 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12034 // CHECK18-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12035 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 12036 // CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12037 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12038 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 12039 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12040 // CHECK18: cond.true: 12041 // CHECK18-NEXT: br label [[COND_END:%.*]] 12042 // CHECK18: cond.false: 12043 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12044 // CHECK18-NEXT: br label [[COND_END]] 12045 // CHECK18: cond.end: 12046 // CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 12047 // CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12048 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12049 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 12050 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12051 // CHECK18: omp.inner.for.cond: 12052 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12053 // CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12054 // CHECK18-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 12055 // CHECK18-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12056 // CHECK18: omp.inner.for.body: 12057 // CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12058 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 12059 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 12060 // CHECK18-NEXT: store i32 [[ADD]], i32* [[I]], align 4 12061 // CHECK18-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 12062 // CHECK18-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 12063 // CHECK18-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 12064 // CHECK18: .cancel.exit: 12065 // CHECK18-NEXT: br label [[CANCEL_EXIT:%.*]] 12066 // CHECK18: .cancel.continue: 12067 // CHECK18-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 12068 // CHECK18-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 12069 // CHECK18-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 12070 // CHECK18: .cancel.exit2: 12071 // CHECK18-NEXT: br label [[CANCEL_EXIT]] 12072 // CHECK18: .cancel.continue3: 12073 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12074 // CHECK18: omp.body.continue: 12075 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12076 // CHECK18: omp.inner.for.inc: 12077 // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12078 // CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 12079 // CHECK18-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 12080 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]] 12081 // CHECK18: omp.inner.for.end: 12082 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12083 // CHECK18: omp.loop.exit: 12084 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 12085 // CHECK18-NEXT: br label [[CANCEL_CONT:%.*]] 12086 // CHECK18: cancel.cont: 12087 // CHECK18-NEXT: ret void 12088 // CHECK18: cancel.exit: 12089 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 12090 // CHECK18-NEXT: br label [[CANCEL_CONT]] 12091 // 12092 // 12093 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110 12094 // CHECK18-SAME: (i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 12095 // CHECK18-NEXT: entry: 12096 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 12097 // CHECK18-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 12098 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 12099 // CHECK18-NEXT: [[K_CASTED:%.*]] = alloca i64, align 8 12100 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 12101 // CHECK18-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 12102 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 12103 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 12104 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[A_CASTED]] to i32* 12105 // CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 12106 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 12107 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[K_ADDR]], align 8 12108 // CHECK18-NEXT: store i64 [[TMP2]], i64* [[K_CASTED]], align 8 12109 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[K_CASTED]], align 8 12110 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 12111 // CHECK18-NEXT: ret void 12112 // 12113 // 12114 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..1 12115 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[K:%.*]]) #[[ATTR3]] { 12116 // CHECK18-NEXT: entry: 12117 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12118 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12119 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 12120 // CHECK18-NEXT: [[K_ADDR:%.*]] = alloca i64, align 8 12121 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12122 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 12123 // CHECK18-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 12124 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12125 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12126 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12127 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12128 // CHECK18-NEXT: [[I:%.*]] = alloca i32, align 4 12129 // CHECK18-NEXT: [[K1:%.*]] = alloca i64, align 8 12130 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12131 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12132 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 12133 // CHECK18-NEXT: store i64 [[K]], i64* [[K_ADDR]], align 8 12134 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 12135 // CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[K_ADDR]], align 8 12136 // CHECK18-NEXT: store i64 [[TMP0]], i64* [[DOTLINEAR_START]], align 8 12137 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12138 // CHECK18-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 12139 // CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12140 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12141 // CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12142 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 12143 // CHECK18-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 12144 // CHECK18-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) 12145 // CHECK18-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 12146 // CHECK18: omp.dispatch.cond: 12147 // CHECK18-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP2]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 12148 // CHECK18-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP3]], 0 12149 // CHECK18-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 12150 // CHECK18: omp.dispatch.body: 12151 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12152 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 12153 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12154 // CHECK18: omp.inner.for.cond: 12155 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 12156 // CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 12157 // CHECK18-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 12158 // CHECK18-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12159 // CHECK18: omp.inner.for.body: 12160 // CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 12161 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1 12162 // CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 12163 // CHECK18-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !12 12164 // CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !12 12165 // CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 12166 // CHECK18-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP9]], 3 12167 // CHECK18-NEXT: [[CONV3:%.*]] = sext i32 [[MUL2]] to i64 12168 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP8]], [[CONV3]] 12169 // CHECK18-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !12 12170 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV]], align 8, !llvm.access.group !12 12171 // CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], 1 12172 // CHECK18-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8, !llvm.access.group !12 12173 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12174 // CHECK18: omp.body.continue: 12175 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12176 // CHECK18: omp.inner.for.inc: 12177 // CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 12178 // CHECK18-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 12179 // CHECK18-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 12180 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 12181 // CHECK18: omp.inner.for.end: 12182 // CHECK18-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 12183 // CHECK18: omp.dispatch.inc: 12184 // CHECK18-NEXT: br label [[OMP_DISPATCH_COND]] 12185 // CHECK18: omp.dispatch.end: 12186 // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12187 // CHECK18-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 12188 // CHECK18-NEXT: br i1 [[TMP13]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 12189 // CHECK18: .omp.linear.pu: 12190 // CHECK18-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8 12191 // CHECK18-NEXT: [[ADD6:%.*]] = add nsw i64 [[TMP14]], 27 12192 // CHECK18-NEXT: store i64 [[ADD6]], i64* [[K_ADDR]], align 8 12193 // CHECK18-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 12194 // CHECK18: .omp.linear.pu.done: 12195 // CHECK18-NEXT: ret void 12196 // 12197 // 12198 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 12199 // CHECK18-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR2]] { 12200 // CHECK18-NEXT: entry: 12201 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 12202 // CHECK18-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 12203 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 12204 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 12205 // CHECK18-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 12206 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 12207 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 12208 // CHECK18-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 12209 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 12210 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 12211 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 12212 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 12213 // CHECK18-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 12214 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 12215 // CHECK18-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 12216 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 12217 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 12218 // CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 12219 // CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 12220 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 12221 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 12222 // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 12223 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 12224 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 12225 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 12226 // CHECK18-NEXT: ret void 12227 // 12228 // 12229 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..2 12230 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR3]] { 12231 // CHECK18-NEXT: entry: 12232 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12233 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12234 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 12235 // CHECK18-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 12236 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 12237 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 12238 // CHECK18-NEXT: [[TMP:%.*]] = alloca i64, align 8 12239 // CHECK18-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 12240 // CHECK18-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 12241 // CHECK18-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 12242 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 12243 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 12244 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 12245 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12246 // CHECK18-NEXT: [[IT:%.*]] = alloca i64, align 8 12247 // CHECK18-NEXT: [[LIN4:%.*]] = alloca i32, align 4 12248 // CHECK18-NEXT: [[A5:%.*]] = alloca i32, align 4 12249 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12250 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12251 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 12252 // CHECK18-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 12253 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 12254 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 12255 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 12256 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 12257 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 12258 // CHECK18-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 12259 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 12260 // CHECK18-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 12261 // CHECK18-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 12262 // CHECK18-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 12263 // CHECK18-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 12264 // CHECK18-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 12265 // CHECK18-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 12266 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12267 // CHECK18-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12268 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 12269 // CHECK18-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 12270 // CHECK18-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 12271 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12272 // CHECK18-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 12273 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12274 // CHECK18: cond.true: 12275 // CHECK18-NEXT: br label [[COND_END:%.*]] 12276 // CHECK18: cond.false: 12277 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12278 // CHECK18-NEXT: br label [[COND_END]] 12279 // CHECK18: cond.end: 12280 // CHECK18-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 12281 // CHECK18-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 12282 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 12283 // CHECK18-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 12284 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12285 // CHECK18: omp.inner.for.cond: 12286 // CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 12287 // CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 12288 // CHECK18-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 12289 // CHECK18-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12290 // CHECK18: omp.inner.for.body: 12291 // CHECK18-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 12292 // CHECK18-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 12293 // CHECK18-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 12294 // CHECK18-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 12295 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 12296 // CHECK18-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 12297 // CHECK18-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 12298 // CHECK18-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 12299 // CHECK18-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 12300 // CHECK18-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 12301 // CHECK18-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 12302 // CHECK18-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4 12303 // CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 12304 // CHECK18-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 12305 // CHECK18-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 12306 // CHECK18-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 12307 // CHECK18-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 12308 // CHECK18-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 12309 // CHECK18-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 12310 // CHECK18-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4 12311 // CHECK18-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8 12312 // CHECK18-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 12313 // CHECK18-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 12314 // CHECK18-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 12315 // CHECK18-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8 12316 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12317 // CHECK18: omp.body.continue: 12318 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12319 // CHECK18: omp.inner.for.inc: 12320 // CHECK18-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 12321 // CHECK18-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 12322 // CHECK18-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8 12323 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]] 12324 // CHECK18: omp.inner.for.end: 12325 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12326 // CHECK18: omp.loop.exit: 12327 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 12328 // CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12329 // CHECK18-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 12330 // CHECK18-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 12331 // CHECK18: .omp.linear.pu: 12332 // CHECK18-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 12333 // CHECK18-NEXT: [[CONV18:%.*]] = sext i32 [[TMP20]] to i64 12334 // CHECK18-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 12335 // CHECK18-NEXT: [[MUL19:%.*]] = mul i64 4, [[TMP21]] 12336 // CHECK18-NEXT: [[ADD20:%.*]] = add i64 [[CONV18]], [[MUL19]] 12337 // CHECK18-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD20]] to i32 12338 // CHECK18-NEXT: store i32 [[CONV21]], i32* [[CONV1]], align 8 12339 // CHECK18-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 12340 // CHECK18-NEXT: [[CONV22:%.*]] = sext i32 [[TMP22]] to i64 12341 // CHECK18-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 12342 // CHECK18-NEXT: [[MUL23:%.*]] = mul i64 4, [[TMP23]] 12343 // CHECK18-NEXT: [[ADD24:%.*]] = add i64 [[CONV22]], [[MUL23]] 12344 // CHECK18-NEXT: [[CONV25:%.*]] = trunc i64 [[ADD24]] to i32 12345 // CHECK18-NEXT: store i32 [[CONV25]], i32* [[CONV2]], align 8 12346 // CHECK18-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 12347 // CHECK18: .omp.linear.pu.done: 12348 // CHECK18-NEXT: ret void 12349 // 12350 // 12351 // CHECK18-LABEL: define {{[^@]+}}@.omp_task_privates_map. 12352 // CHECK18-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i16** noalias [[TMP1:%.*]], [3 x i8*]** noalias [[TMP2:%.*]], [3 x i8*]** noalias [[TMP3:%.*]], [3 x i64]** noalias [[TMP4:%.*]]) #[[ATTR6:[0-9]+]] { 12353 // CHECK18-NEXT: entry: 12354 // CHECK18-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8 12355 // CHECK18-NEXT: [[DOTADDR1:%.*]] = alloca i16**, align 8 12356 // CHECK18-NEXT: [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 8 12357 // CHECK18-NEXT: [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 8 12358 // CHECK18-NEXT: [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 8 12359 // CHECK18-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8 12360 // CHECK18-NEXT: store i16** [[TMP1]], i16*** [[DOTADDR1]], align 8 12361 // CHECK18-NEXT: store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 8 12362 // CHECK18-NEXT: store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 8 12363 // CHECK18-NEXT: store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 8 12364 // CHECK18-NEXT: [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8 12365 // CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0 12366 // CHECK18-NEXT: [[TMP7:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 8 12367 // CHECK18-NEXT: store [3 x i8*]* [[TMP6]], [3 x i8*]** [[TMP7]], align 8 12368 // CHECK18-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1 12369 // CHECK18-NEXT: [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 8 12370 // CHECK18-NEXT: store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 8 12371 // CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2 12372 // CHECK18-NEXT: [[TMP11:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 8 12373 // CHECK18-NEXT: store [3 x i64]* [[TMP10]], [3 x i64]** [[TMP11]], align 8 12374 // CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3 12375 // CHECK18-NEXT: [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 8 12376 // CHECK18-NEXT: store i16* [[TMP12]], i16** [[TMP13]], align 8 12377 // CHECK18-NEXT: ret void 12378 // 12379 // 12380 // CHECK18-LABEL: define {{[^@]+}}@.omp_task_entry. 12381 // CHECK18-SAME: (i32 signext [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] { 12382 // CHECK18-NEXT: entry: 12383 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 12384 // CHECK18-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 12385 // CHECK18-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 12386 // CHECK18-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 12387 // CHECK18-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 12388 // CHECK18-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 12389 // CHECK18-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 8 12390 // CHECK18-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 8 12391 // CHECK18-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 8 12392 // CHECK18-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 8 12393 // CHECK18-NEXT: [[AA_CASTED_I:%.*]] = alloca i64, align 8 12394 // CHECK18-NEXT: [[LIN_CASTED_I:%.*]] = alloca i64, align 8 12395 // CHECK18-NEXT: [[A_CASTED_I:%.*]] = alloca i64, align 8 12396 // CHECK18-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 12397 // CHECK18-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 12398 // CHECK18-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 12399 // CHECK18-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 12400 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 12401 // CHECK18-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 12402 // CHECK18-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 12403 // CHECK18-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 12404 // CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 12405 // CHECK18-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 12406 // CHECK18-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 12407 // CHECK18-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 12408 // CHECK18-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* 12409 // CHECK18-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 12410 // CHECK18-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META15:![0-9]+]]) 12411 // CHECK18-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META18:![0-9]+]]) 12412 // CHECK18-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]]) 12413 // CHECK18-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]]) 12414 // CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !24 12415 // CHECK18-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !24 12416 // CHECK18-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 12417 // CHECK18-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 12418 // CHECK18-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !24 12419 // CHECK18-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !24 12420 // CHECK18-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !24 12421 // CHECK18-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !24 12422 // CHECK18-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !24 12423 // CHECK18-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* 12424 // CHECK18-NEXT: call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] 12425 // CHECK18-NEXT: [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !24 12426 // CHECK18-NEXT: [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !24 12427 // CHECK18-NEXT: [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !24 12428 // CHECK18-NEXT: [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !24 12429 // CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i64 0, i64 0 12430 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i64 0, i64 0 12431 // CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i64 0, i64 0 12432 // CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1 12433 // CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2 12434 // CHECK18-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 12435 // CHECK18-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 12436 // CHECK18-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] 12437 // CHECK18: omp_offload.failed.i: 12438 // CHECK18-NEXT: [[TMP27:%.*]] = load i16, i16* [[TMP16]], align 2 12439 // CHECK18-NEXT: [[CONV_I:%.*]] = bitcast i64* [[AA_CASTED_I]] to i16* 12440 // CHECK18-NEXT: store i16 [[TMP27]], i16* [[CONV_I]], align 2, !noalias !24 12441 // CHECK18-NEXT: [[TMP28:%.*]] = load i64, i64* [[AA_CASTED_I]], align 8, !noalias !24 12442 // CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP23]], align 4 12443 // CHECK18-NEXT: [[CONV4_I:%.*]] = bitcast i64* [[LIN_CASTED_I]] to i32* 12444 // CHECK18-NEXT: store i32 [[TMP29]], i32* [[CONV4_I]], align 4, !noalias !24 12445 // CHECK18-NEXT: [[TMP30:%.*]] = load i64, i64* [[LIN_CASTED_I]], align 8, !noalias !24 12446 // CHECK18-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP24]], align 4 12447 // CHECK18-NEXT: [[CONV5_I:%.*]] = bitcast i64* [[A_CASTED_I]] to i32* 12448 // CHECK18-NEXT: store i32 [[TMP31]], i32* [[CONV5_I]], align 4, !noalias !24 12449 // CHECK18-NEXT: [[TMP32:%.*]] = load i64, i64* [[A_CASTED_I]], align 8, !noalias !24 12450 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138(i64 [[TMP28]], i64 [[TMP30]], i64 [[TMP32]]) #[[ATTR4]] 12451 // CHECK18-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] 12452 // CHECK18: .omp_outlined..3.exit: 12453 // CHECK18-NEXT: ret i32 0 12454 // 12455 // 12456 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 12457 // CHECK18-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR2]] { 12458 // CHECK18-NEXT: entry: 12459 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 12460 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 12461 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 12462 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 12463 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 12464 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 12465 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 12466 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 12467 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 12468 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 12469 // CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 12470 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 12471 // CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 12472 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 12473 // CHECK18-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 12474 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 12475 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 12476 // CHECK18-NEXT: ret void 12477 // 12478 // 12479 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..4 12480 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR3]] { 12481 // CHECK18-NEXT: entry: 12482 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12483 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12484 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 12485 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 12486 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12487 // CHECK18-NEXT: [[TMP:%.*]] = alloca i16, align 2 12488 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12489 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12490 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12491 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12492 // CHECK18-NEXT: [[IT:%.*]] = alloca i16, align 2 12493 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12494 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12495 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 12496 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 12497 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 12498 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 12499 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12500 // CHECK18-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 12501 // CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12502 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12503 // CHECK18-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12504 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 12505 // CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12506 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12507 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 12508 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12509 // CHECK18: cond.true: 12510 // CHECK18-NEXT: br label [[COND_END:%.*]] 12511 // CHECK18: cond.false: 12512 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12513 // CHECK18-NEXT: br label [[COND_END]] 12514 // CHECK18: cond.end: 12515 // CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 12516 // CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12517 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12518 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 12519 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12520 // CHECK18: omp.inner.for.cond: 12521 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12522 // CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12523 // CHECK18-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 12524 // CHECK18-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12525 // CHECK18: omp.inner.for.body: 12526 // CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12527 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 12528 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 12529 // CHECK18-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 12530 // CHECK18-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2 12531 // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 12532 // CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 12533 // CHECK18-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8 12534 // CHECK18-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8 12535 // CHECK18-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 12536 // CHECK18-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 12537 // CHECK18-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 12538 // CHECK18-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8 12539 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12540 // CHECK18: omp.body.continue: 12541 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12542 // CHECK18: omp.inner.for.inc: 12543 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12544 // CHECK18-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 12545 // CHECK18-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 12546 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]] 12547 // CHECK18: omp.inner.for.end: 12548 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12549 // CHECK18: omp.loop.exit: 12550 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 12551 // CHECK18-NEXT: ret void 12552 // 12553 // 12554 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 12555 // CHECK18-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 12556 // CHECK18-NEXT: entry: 12557 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 12558 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 12559 // CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 12560 // CHECK18-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 12561 // CHECK18-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 12562 // CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 12563 // CHECK18-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 12564 // CHECK18-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 12565 // CHECK18-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 12566 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 12567 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 12568 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 12569 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 12570 // CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 12571 // CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 12572 // CHECK18-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 12573 // CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 12574 // CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 12575 // CHECK18-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 12576 // CHECK18-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 12577 // CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 12578 // CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 12579 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 12580 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 12581 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 12582 // CHECK18-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 12583 // CHECK18-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 12584 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 12585 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 12586 // CHECK18-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 12587 // CHECK18-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 12588 // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 12589 // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 12590 // CHECK18-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 12591 // CHECK18-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 12592 // CHECK18-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 12593 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 12594 // CHECK18-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 12595 // CHECK18-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 12596 // CHECK18-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 12597 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 12598 // CHECK18-NEXT: ret void 12599 // 12600 // 12601 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..7 12602 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 12603 // CHECK18-NEXT: entry: 12604 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12605 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12606 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 12607 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 12608 // CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 12609 // CHECK18-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 12610 // CHECK18-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 12611 // CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 12612 // CHECK18-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 12613 // CHECK18-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 12614 // CHECK18-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 12615 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 12616 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12617 // CHECK18-NEXT: [[TMP:%.*]] = alloca i8, align 1 12618 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12619 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12620 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12621 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12622 // CHECK18-NEXT: [[IT:%.*]] = alloca i8, align 1 12623 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12624 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12625 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 12626 // CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 12627 // CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 12628 // CHECK18-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 12629 // CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 12630 // CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 12631 // CHECK18-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 12632 // CHECK18-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 12633 // CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 12634 // CHECK18-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 12635 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 12636 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 12637 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 12638 // CHECK18-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 12639 // CHECK18-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 12640 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 12641 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 12642 // CHECK18-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 12643 // CHECK18-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 12644 // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 12645 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12646 // CHECK18-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 12647 // CHECK18-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12648 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12649 // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 12650 // CHECK18-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12651 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 12652 // CHECK18-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 12653 // CHECK18-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 12654 // CHECK18: omp.dispatch.cond: 12655 // CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12656 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 12657 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12658 // CHECK18: cond.true: 12659 // CHECK18-NEXT: br label [[COND_END:%.*]] 12660 // CHECK18: cond.false: 12661 // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12662 // CHECK18-NEXT: br label [[COND_END]] 12663 // CHECK18: cond.end: 12664 // CHECK18-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 12665 // CHECK18-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12666 // CHECK18-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12667 // CHECK18-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 12668 // CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12669 // CHECK18-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12670 // CHECK18-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 12671 // CHECK18-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 12672 // CHECK18: omp.dispatch.body: 12673 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12674 // CHECK18: omp.inner.for.cond: 12675 // CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12676 // CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12677 // CHECK18-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 12678 // CHECK18-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12679 // CHECK18: omp.inner.for.body: 12680 // CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12681 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 12682 // CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 12683 // CHECK18-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 12684 // CHECK18-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1 12685 // CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8 12686 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 12687 // CHECK18-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 12688 // CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 12689 // CHECK18-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 12690 // CHECK18-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 12691 // CHECK18-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 12692 // CHECK18-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 12693 // CHECK18-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4 12694 // CHECK18-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 12695 // CHECK18-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4 12696 // CHECK18-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 12697 // CHECK18-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 12698 // CHECK18-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 12699 // CHECK18-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4 12700 // CHECK18-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 12701 // CHECK18-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 12702 // CHECK18-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8 12703 // CHECK18-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 12704 // CHECK18-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8 12705 // CHECK18-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 12706 // CHECK18-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 12707 // CHECK18-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 12708 // CHECK18-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8 12709 // CHECK18-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 12710 // CHECK18-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8 12711 // CHECK18-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 12712 // CHECK18-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8 12713 // CHECK18-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 12714 // CHECK18-NEXT: store i64 [[ADD22]], i64* [[X]], align 8 12715 // CHECK18-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 12716 // CHECK18-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8 12717 // CHECK18-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 12718 // CHECK18-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 12719 // CHECK18-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 12720 // CHECK18-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8 12721 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12722 // CHECK18: omp.body.continue: 12723 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12724 // CHECK18: omp.inner.for.inc: 12725 // CHECK18-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12726 // CHECK18-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 12727 // CHECK18-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4 12728 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]] 12729 // CHECK18: omp.inner.for.end: 12730 // CHECK18-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 12731 // CHECK18: omp.dispatch.inc: 12732 // CHECK18-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12733 // CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 12734 // CHECK18-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 12735 // CHECK18-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 12736 // CHECK18-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12737 // CHECK18-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 12738 // CHECK18-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 12739 // CHECK18-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 12740 // CHECK18-NEXT: br label [[OMP_DISPATCH_COND]] 12741 // CHECK18: omp.dispatch.end: 12742 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 12743 // CHECK18-NEXT: ret void 12744 // 12745 // 12746 // CHECK18-LABEL: define {{[^@]+}}@_Z3bari 12747 // CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 12748 // CHECK18-NEXT: entry: 12749 // CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 12750 // CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4 12751 // CHECK18-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 8 12752 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 12753 // CHECK18-NEXT: store i32 0, i32* [[A]], align 4 12754 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 12755 // CHECK18-NEXT: [[CALL:%.*]] = call signext i32 @_Z3fooi(i32 signext [[TMP0]]) 12756 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 12757 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 12758 // CHECK18-NEXT: store i32 [[ADD]], i32* [[A]], align 4 12759 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 12760 // CHECK18-NEXT: [[CALL1:%.*]] = call signext i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 8 dereferenceable(8) [[S]], i32 signext [[TMP2]]) 12761 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 12762 // CHECK18-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 12763 // CHECK18-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 12764 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 12765 // CHECK18-NEXT: [[CALL3:%.*]] = call signext i32 @_ZL7fstatici(i32 signext [[TMP4]]) 12766 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 12767 // CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 12768 // CHECK18-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 12769 // CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 12770 // CHECK18-NEXT: [[CALL5:%.*]] = call signext i32 @_Z9ftemplateIiET_i(i32 signext [[TMP6]]) 12771 // CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 12772 // CHECK18-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 12773 // CHECK18-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 12774 // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 12775 // CHECK18-NEXT: ret i32 [[TMP8]] 12776 // 12777 // 12778 // CHECK18-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 12779 // CHECK18-SAME: (%struct.S1* nonnull align 8 dereferenceable(8) [[THIS:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] comdat align 2 { 12780 // CHECK18-NEXT: entry: 12781 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 12782 // CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 12783 // CHECK18-NEXT: [[B:%.*]] = alloca i32, align 4 12784 // CHECK18-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 12785 // CHECK18-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 12786 // CHECK18-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 12787 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 12788 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 12789 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 12790 // CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 12791 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 12792 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 12793 // CHECK18-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 12794 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 12795 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 12796 // CHECK18-NEXT: store i32 [[ADD]], i32* [[B]], align 4 12797 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 12798 // CHECK18-NEXT: [[TMP2:%.*]] = zext i32 [[TMP1]] to i64 12799 // CHECK18-NEXT: [[TMP3:%.*]] = call i8* @llvm.stacksave() 12800 // CHECK18-NEXT: store i8* [[TMP3]], i8** [[SAVED_STACK]], align 8 12801 // CHECK18-NEXT: [[TMP4:%.*]] = mul nuw i64 2, [[TMP2]] 12802 // CHECK18-NEXT: [[VLA:%.*]] = alloca i16, i64 [[TMP4]], align 2 12803 // CHECK18-NEXT: store i64 [[TMP2]], i64* [[__VLA_EXPR0]], align 8 12804 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[B]], align 4 12805 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_CASTED]] to i32* 12806 // CHECK18-NEXT: store i32 [[TMP5]], i32* [[CONV]], align 4 12807 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[B_CASTED]], align 8 12808 // CHECK18-NEXT: [[TMP7:%.*]] = load i32, i32* [[N_ADDR]], align 4 12809 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP7]], 60 12810 // CHECK18-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 12811 // CHECK18: omp_if.then: 12812 // CHECK18-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 12813 // CHECK18-NEXT: [[TMP8:%.*]] = mul nuw i64 2, [[TMP2]] 12814 // CHECK18-NEXT: [[TMP9:%.*]] = mul nuw i64 [[TMP8]], 2 12815 // CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12816 // CHECK18-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 12817 // CHECK18-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 8 12818 // CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12819 // CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 12820 // CHECK18-NEXT: store double* [[A]], double** [[TMP13]], align 8 12821 // CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 12822 // CHECK18-NEXT: store i64 8, i64* [[TMP14]], align 8 12823 // CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 12824 // CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 12825 // CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 12826 // CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* 12827 // CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 12828 // CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 12829 // CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* 12830 // CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 12831 // CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 12832 // CHECK18-NEXT: store i64 4, i64* [[TMP20]], align 8 12833 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 12834 // CHECK18-NEXT: store i8* null, i8** [[TMP21]], align 8 12835 // CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 12836 // CHECK18-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* 12837 // CHECK18-NEXT: store i64 2, i64* [[TMP23]], align 8 12838 // CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 12839 // CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* 12840 // CHECK18-NEXT: store i64 2, i64* [[TMP25]], align 8 12841 // CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 12842 // CHECK18-NEXT: store i64 8, i64* [[TMP26]], align 8 12843 // CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 12844 // CHECK18-NEXT: store i8* null, i8** [[TMP27]], align 8 12845 // CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 12846 // CHECK18-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* 12847 // CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 12848 // CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 12849 // CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* 12850 // CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 12851 // CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 12852 // CHECK18-NEXT: store i64 8, i64* [[TMP32]], align 8 12853 // CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 12854 // CHECK18-NEXT: store i8* null, i8** [[TMP33]], align 8 12855 // CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 12856 // CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 12857 // CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 12858 // CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 12859 // CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 12860 // CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 12861 // CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 12862 // CHECK18-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 12863 // CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 12864 // CHECK18-NEXT: store i8* null, i8** [[TMP39]], align 8 12865 // CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12866 // CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12867 // CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 12868 // CHECK18-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 12869 // CHECK18-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 12870 // CHECK18-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 12871 // CHECK18: omp_offload.failed: 12872 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 12873 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] 12874 // CHECK18: omp_offload.cont: 12875 // CHECK18-NEXT: br label [[OMP_IF_END:%.*]] 12876 // CHECK18: omp_if.else: 12877 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] 12878 // CHECK18-NEXT: br label [[OMP_IF_END]] 12879 // CHECK18: omp_if.end: 12880 // CHECK18-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] 12881 // CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] 12882 // CHECK18-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 12883 // CHECK18-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 12884 // CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 12885 // CHECK18-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 12886 // CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] 12887 // CHECK18-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 12888 // CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 12889 // CHECK18-NEXT: ret i32 [[ADD4]] 12890 // 12891 // 12892 // CHECK18-LABEL: define {{[^@]+}}@_ZL7fstatici 12893 // CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] { 12894 // CHECK18-NEXT: entry: 12895 // CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 12896 // CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4 12897 // CHECK18-NEXT: [[AA:%.*]] = alloca i16, align 2 12898 // CHECK18-NEXT: [[AAA:%.*]] = alloca i8, align 1 12899 // CHECK18-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 12900 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 12901 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 12902 // CHECK18-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 12903 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 12904 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 12905 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 12906 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 12907 // CHECK18-NEXT: store i32 0, i32* [[A]], align 4 12908 // CHECK18-NEXT: store i16 0, i16* [[AA]], align 2 12909 // CHECK18-NEXT: store i8 0, i8* [[AAA]], align 1 12910 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 12911 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 12912 // CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 12913 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 12914 // CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 12915 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 12916 // CHECK18-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 12917 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 12918 // CHECK18-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 12919 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 12920 // CHECK18-NEXT: store i8 [[TMP4]], i8* [[CONV2]], align 1 12921 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 12922 // CHECK18-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 12923 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 12924 // CHECK18-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 12925 // CHECK18: omp_if.then: 12926 // CHECK18-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12927 // CHECK18-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 12928 // CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 12929 // CHECK18-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12930 // CHECK18-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i64* 12931 // CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP10]], align 8 12932 // CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 12933 // CHECK18-NEXT: store i8* null, i8** [[TMP11]], align 8 12934 // CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 12935 // CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 12936 // CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 12937 // CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 12938 // CHECK18-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 12939 // CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP15]], align 8 12940 // CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 12941 // CHECK18-NEXT: store i8* null, i8** [[TMP16]], align 8 12942 // CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 12943 // CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* 12944 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP18]], align 8 12945 // CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 12946 // CHECK18-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* 12947 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP20]], align 8 12948 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 12949 // CHECK18-NEXT: store i8* null, i8** [[TMP21]], align 8 12950 // CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 12951 // CHECK18-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 12952 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 8 12953 // CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 12954 // CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 12955 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 8 12956 // CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 12957 // CHECK18-NEXT: store i8* null, i8** [[TMP26]], align 8 12958 // CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12959 // CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12960 // CHECK18-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 12961 // CHECK18-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 12962 // CHECK18-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 12963 // CHECK18: omp_offload.failed: 12964 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 12965 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] 12966 // CHECK18: omp_offload.cont: 12967 // CHECK18-NEXT: br label [[OMP_IF_END:%.*]] 12968 // CHECK18: omp_if.else: 12969 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 12970 // CHECK18-NEXT: br label [[OMP_IF_END]] 12971 // CHECK18: omp_if.end: 12972 // CHECK18-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 12973 // CHECK18-NEXT: ret i32 [[TMP31]] 12974 // 12975 // 12976 // CHECK18-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 12977 // CHECK18-SAME: (i32 signext [[N:%.*]]) #[[ATTR0]] comdat { 12978 // CHECK18-NEXT: entry: 12979 // CHECK18-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 12980 // CHECK18-NEXT: [[A:%.*]] = alloca i32, align 4 12981 // CHECK18-NEXT: [[AA:%.*]] = alloca i16, align 2 12982 // CHECK18-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 12983 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 12984 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 12985 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 12986 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 12987 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 12988 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 12989 // CHECK18-NEXT: store i32 0, i32* [[A]], align 4 12990 // CHECK18-NEXT: store i16 0, i16* [[AA]], align 2 12991 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 12992 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_CASTED]] to i32* 12993 // CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 12994 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 12995 // CHECK18-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 12996 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 12997 // CHECK18-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 12998 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 12999 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 13000 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 13001 // CHECK18-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 13002 // CHECK18: omp_if.then: 13003 // CHECK18-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 13004 // CHECK18-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 13005 // CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP6]], align 8 13006 // CHECK18-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 13007 // CHECK18-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i64* 13008 // CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP8]], align 8 13009 // CHECK18-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 13010 // CHECK18-NEXT: store i8* null, i8** [[TMP9]], align 8 13011 // CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 13012 // CHECK18-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* 13013 // CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP11]], align 8 13014 // CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 13015 // CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 13016 // CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP13]], align 8 13017 // CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 13018 // CHECK18-NEXT: store i8* null, i8** [[TMP14]], align 8 13019 // CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 13020 // CHECK18-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 13021 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 8 13022 // CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 13023 // CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 13024 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 8 13025 // CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 13026 // CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8 13027 // CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 13028 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 13029 // CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 13030 // CHECK18-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 13031 // CHECK18-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 13032 // CHECK18: omp_offload.failed: 13033 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 13034 // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] 13035 // CHECK18: omp_offload.cont: 13036 // CHECK18-NEXT: br label [[OMP_IF_END:%.*]] 13037 // CHECK18: omp_if.else: 13038 // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i64 [[TMP1]], i64 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 13039 // CHECK18-NEXT: br label [[OMP_IF_END]] 13040 // CHECK18: omp_if.end: 13041 // CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 13042 // CHECK18-NEXT: ret i32 [[TMP24]] 13043 // 13044 // 13045 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 13046 // CHECK18-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 13047 // CHECK18-NEXT: entry: 13048 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 13049 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 13050 // CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 13051 // CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 13052 // CHECK18-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 13053 // CHECK18-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 13054 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 13055 // CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 13056 // CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 13057 // CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 13058 // CHECK18-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 13059 // CHECK18-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 13060 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 13061 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 13062 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 13063 // CHECK18-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 13064 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 13065 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 13066 // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 13067 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 13068 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 13069 // CHECK18-NEXT: ret void 13070 // 13071 // 13072 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9 13073 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 13074 // CHECK18-NEXT: entry: 13075 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 13076 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 13077 // CHECK18-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 13078 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 13079 // CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 13080 // CHECK18-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 13081 // CHECK18-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 13082 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 13083 // CHECK18-NEXT: [[TMP:%.*]] = alloca i64, align 8 13084 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 13085 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 13086 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 13087 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13088 // CHECK18-NEXT: [[IT:%.*]] = alloca i64, align 8 13089 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 13090 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 13091 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 13092 // CHECK18-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 13093 // CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 13094 // CHECK18-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 13095 // CHECK18-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 13096 // CHECK18-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 13097 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 13098 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 13099 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 13100 // CHECK18-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 13101 // CHECK18-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 13102 // CHECK18-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 13103 // CHECK18-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 13104 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13105 // CHECK18-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 13106 // CHECK18-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 13107 // CHECK18-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 13108 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13109 // CHECK18-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 13110 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13111 // CHECK18: cond.true: 13112 // CHECK18-NEXT: br label [[COND_END:%.*]] 13113 // CHECK18: cond.false: 13114 // CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13115 // CHECK18-NEXT: br label [[COND_END]] 13116 // CHECK18: cond.end: 13117 // CHECK18-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 13118 // CHECK18-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 13119 // CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 13120 // CHECK18-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 13121 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13122 // CHECK18: omp.inner.for.cond: 13123 // CHECK18-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13124 // CHECK18-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13125 // CHECK18-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 13126 // CHECK18-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13127 // CHECK18: omp.inner.for.body: 13128 // CHECK18-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13129 // CHECK18-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 13130 // CHECK18-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 13131 // CHECK18-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 13132 // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8 13133 // CHECK18-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 13134 // CHECK18-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 13135 // CHECK18-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 13136 // CHECK18-NEXT: store double [[ADD]], double* [[A]], align 8 13137 // CHECK18-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 13138 // CHECK18-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8 13139 // CHECK18-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 13140 // CHECK18-NEXT: store double [[INC]], double* [[A5]], align 8 13141 // CHECK18-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 13142 // CHECK18-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 13143 // CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 13144 // CHECK18-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 13145 // CHECK18-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2 13146 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13147 // CHECK18: omp.body.continue: 13148 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13149 // CHECK18: omp.inner.for.inc: 13150 // CHECK18-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13151 // CHECK18-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 13152 // CHECK18-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 13153 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]] 13154 // CHECK18: omp.inner.for.end: 13155 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13156 // CHECK18: omp.loop.exit: 13157 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 13158 // CHECK18-NEXT: ret void 13159 // 13160 // 13161 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 13162 // CHECK18-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 13163 // CHECK18-NEXT: entry: 13164 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 13165 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 13166 // CHECK18-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 13167 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 13168 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 13169 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 13170 // CHECK18-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 13171 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 13172 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 13173 // CHECK18-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 13174 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 13175 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 13176 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 13177 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 13178 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 13179 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 13180 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 13181 // CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 13182 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 13183 // CHECK18-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 13184 // CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 13185 // CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 13186 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 13187 // CHECK18-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 13188 // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 13189 // CHECK18-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 13190 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 13191 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 13192 // CHECK18-NEXT: ret void 13193 // 13194 // 13195 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 13196 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 13197 // CHECK18-NEXT: entry: 13198 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 13199 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 13200 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 13201 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 13202 // CHECK18-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 13203 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 13204 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13205 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 13206 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 13207 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 13208 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 13209 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 13210 // CHECK18-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 13211 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 13212 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 13213 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 13214 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 13215 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 13216 // CHECK18-NEXT: ret void 13217 // 13218 // 13219 // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 13220 // CHECK18-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 13221 // CHECK18-NEXT: entry: 13222 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 13223 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 13224 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 13225 // CHECK18-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 13226 // CHECK18-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 13227 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 13228 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 13229 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 13230 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 13231 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 13232 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 13233 // CHECK18-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 13234 // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 13235 // CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 13236 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 13237 // CHECK18-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 13238 // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 13239 // CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 13240 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 13241 // CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 13242 // CHECK18-NEXT: ret void 13243 // 13244 // 13245 // CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 13246 // CHECK18-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 13247 // CHECK18-NEXT: entry: 13248 // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 13249 // CHECK18-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 13250 // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 13251 // CHECK18-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 13252 // CHECK18-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 13253 // CHECK18-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 13254 // CHECK18-NEXT: [[TMP:%.*]] = alloca i64, align 8 13255 // CHECK18-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 13256 // CHECK18-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 13257 // CHECK18-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 13258 // CHECK18-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13259 // CHECK18-NEXT: [[I:%.*]] = alloca i64, align 8 13260 // CHECK18-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 13261 // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 13262 // CHECK18-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 13263 // CHECK18-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 13264 // CHECK18-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 13265 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 13266 // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 13267 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 13268 // CHECK18-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 13269 // CHECK18-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 13270 // CHECK18-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 13271 // CHECK18-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13272 // CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 13273 // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 13274 // CHECK18-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 13275 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13276 // CHECK18-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 13277 // CHECK18-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13278 // CHECK18: cond.true: 13279 // CHECK18-NEXT: br label [[COND_END:%.*]] 13280 // CHECK18: cond.false: 13281 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13282 // CHECK18-NEXT: br label [[COND_END]] 13283 // CHECK18: cond.end: 13284 // CHECK18-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 13285 // CHECK18-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 13286 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 13287 // CHECK18-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 13288 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13289 // CHECK18: omp.inner.for.cond: 13290 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13291 // CHECK18-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13292 // CHECK18-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 13293 // CHECK18-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13294 // CHECK18: omp.inner.for.body: 13295 // CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13296 // CHECK18-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 13297 // CHECK18-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 13298 // CHECK18-NEXT: store i64 [[ADD]], i64* [[I]], align 8 13299 // CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8 13300 // CHECK18-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 13301 // CHECK18-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8 13302 // CHECK18-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8 13303 // CHECK18-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 13304 // CHECK18-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 13305 // CHECK18-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 13306 // CHECK18-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8 13307 // CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 13308 // CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 13309 // CHECK18-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 13310 // CHECK18-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4 13311 // CHECK18-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13312 // CHECK18: omp.body.continue: 13313 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13314 // CHECK18: omp.inner.for.inc: 13315 // CHECK18-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13316 // CHECK18-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 13317 // CHECK18-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 13318 // CHECK18-NEXT: br label [[OMP_INNER_FOR_COND]] 13319 // CHECK18: omp.inner.for.end: 13320 // CHECK18-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13321 // CHECK18: omp.loop.exit: 13322 // CHECK18-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 13323 // CHECK18-NEXT: ret void 13324 // 13325 // 13326 // CHECK18-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 13327 // CHECK18-SAME: () #[[ATTR6]] { 13328 // CHECK18-NEXT: entry: 13329 // CHECK18-NEXT: call void @__tgt_register_requires(i64 1) 13330 // CHECK18-NEXT: ret void 13331 // 13332 // 13333 // CHECK19-LABEL: define {{[^@]+}}@_Z7get_valv 13334 // CHECK19-SAME: () #[[ATTR0:[0-9]+]] { 13335 // CHECK19-NEXT: entry: 13336 // CHECK19-NEXT: ret i64 0 13337 // 13338 // 13339 // CHECK19-LABEL: define {{[^@]+}}@_Z3fooi 13340 // CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 13341 // CHECK19-NEXT: entry: 13342 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 13343 // CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4 13344 // CHECK19-NEXT: [[AA:%.*]] = alloca i16, align 2 13345 // CHECK19-NEXT: [[B:%.*]] = alloca [10 x float], align 4 13346 // CHECK19-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 13347 // CHECK19-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 13348 // CHECK19-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 13349 // CHECK19-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 13350 // CHECK19-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 13351 // CHECK19-NEXT: [[K:%.*]] = alloca i64, align 8 13352 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 13353 // CHECK19-NEXT: [[LIN:%.*]] = alloca i32, align 4 13354 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 13355 // CHECK19-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 13356 // CHECK19-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 13357 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 13358 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 13359 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 13360 // CHECK19-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4 13361 // CHECK19-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 13362 // CHECK19-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 13363 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 13364 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 13365 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 13366 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 13367 // CHECK19-NEXT: [[A_CASTED11:%.*]] = alloca i32, align 4 13368 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 13369 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 13370 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 13371 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 13372 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 13373 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 13374 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 13375 // CHECK19-NEXT: store i32 0, i32* [[A]], align 4 13376 // CHECK19-NEXT: store i16 0, i16* [[AA]], align 2 13377 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 13378 // CHECK19-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 13379 // CHECK19-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 13380 // CHECK19-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 13381 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 13382 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 13383 // CHECK19-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] 13384 // CHECK19-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 13385 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 13386 // CHECK19-NEXT: [[TMP5:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) 13387 // CHECK19-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 13388 // CHECK19-NEXT: br i1 [[TMP6]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 13389 // CHECK19: omp_offload.failed: 13390 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103() #[[ATTR4:[0-9]+]] 13391 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] 13392 // CHECK19: omp_offload.cont: 13393 // CHECK19-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 13394 // CHECK19-NEXT: store i64 [[CALL]], i64* [[K]], align 8 13395 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 13396 // CHECK19-NEXT: store i32 [[TMP7]], i32* [[A_CASTED]], align 4 13397 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_CASTED]], align 4 13398 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP8]], i64* [[K]]) #[[ATTR4]] 13399 // CHECK19-NEXT: store i32 12, i32* [[LIN]], align 4 13400 // CHECK19-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2 13401 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 13402 // CHECK19-NEXT: store i16 [[TMP9]], i16* [[CONV]], align 2 13403 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[AA_CASTED]], align 4 13404 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[LIN]], align 4 13405 // CHECK19-NEXT: store i32 [[TMP11]], i32* [[LIN_CASTED]], align 4 13406 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 13407 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[A]], align 4 13408 // CHECK19-NEXT: store i32 [[TMP13]], i32* [[A_CASTED2]], align 4 13409 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[A_CASTED2]], align 4 13410 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 13411 // CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* 13412 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[TMP16]], align 4 13413 // CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 13414 // CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 13415 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[TMP18]], align 4 13416 // CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 13417 // CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4 13418 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 13419 // CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* 13420 // CHECK19-NEXT: store i32 [[TMP12]], i32* [[TMP21]], align 4 13421 // CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 13422 // CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 13423 // CHECK19-NEXT: store i32 [[TMP12]], i32* [[TMP23]], align 4 13424 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 13425 // CHECK19-NEXT: store i8* null, i8** [[TMP24]], align 4 13426 // CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 13427 // CHECK19-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 13428 // CHECK19-NEXT: store i32 [[TMP14]], i32* [[TMP26]], align 4 13429 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 13430 // CHECK19-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 13431 // CHECK19-NEXT: store i32 [[TMP14]], i32* [[TMP28]], align 4 13432 // CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 13433 // CHECK19-NEXT: store i8* null, i8** [[TMP29]], align 4 13434 // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 13435 // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 13436 // CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0 13437 // CHECK19-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2 13438 // CHECK19-NEXT: store i16 [[TMP33]], i16* [[TMP32]], align 4 13439 // CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1 13440 // CHECK19-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN]], align 4 13441 // CHECK19-NEXT: store i32 [[TMP35]], i32* [[TMP34]], align 4 13442 // CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2 13443 // CHECK19-NEXT: [[TMP37:%.*]] = load i32, i32* [[A]], align 4 13444 // CHECK19-NEXT: store i32 [[TMP37]], i32* [[TMP36]], align 4 13445 // CHECK19-NEXT: [[TMP38:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 13446 // CHECK19-NEXT: [[TMP39:%.*]] = bitcast i8* [[TMP38]] to %struct.kmp_task_t_with_privates* 13447 // CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 0 13448 // CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP40]], i32 0, i32 0 13449 // CHECK19-NEXT: [[TMP42:%.*]] = load i8*, i8** [[TMP41]], align 4 13450 // CHECK19-NEXT: [[TMP43:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* 13451 // CHECK19-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i32 12, i1 false) 13452 // CHECK19-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 1 13453 // CHECK19-NEXT: [[TMP45:%.*]] = bitcast i8* [[TMP42]] to %struct.anon* 13454 // CHECK19-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 0 13455 // CHECK19-NEXT: [[TMP47:%.*]] = bitcast [3 x i64]* [[TMP46]] to i8* 13456 // CHECK19-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP47]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false) 13457 // CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 1 13458 // CHECK19-NEXT: [[TMP49:%.*]] = bitcast [3 x i8*]* [[TMP48]] to i8* 13459 // CHECK19-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP30]] to i8* 13460 // CHECK19-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP49]], i8* align 4 [[TMP50]], i32 12, i1 false) 13461 // CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 2 13462 // CHECK19-NEXT: [[TMP52:%.*]] = bitcast [3 x i8*]* [[TMP51]] to i8* 13463 // CHECK19-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP31]] to i8* 13464 // CHECK19-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP52]], i8* align 4 [[TMP53]], i32 12, i1 false) 13465 // CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 3 13466 // CHECK19-NEXT: [[TMP55:%.*]] = load i16, i16* [[AA]], align 2 13467 // CHECK19-NEXT: store i16 [[TMP55]], i16* [[TMP54]], align 4 13468 // CHECK19-NEXT: [[TMP56:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP38]]) 13469 // CHECK19-NEXT: [[TMP57:%.*]] = load i32, i32* [[A]], align 4 13470 // CHECK19-NEXT: store i32 [[TMP57]], i32* [[A_CASTED3]], align 4 13471 // CHECK19-NEXT: [[TMP58:%.*]] = load i32, i32* [[A_CASTED3]], align 4 13472 // CHECK19-NEXT: [[TMP59:%.*]] = load i16, i16* [[AA]], align 2 13473 // CHECK19-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* 13474 // CHECK19-NEXT: store i16 [[TMP59]], i16* [[CONV5]], align 2 13475 // CHECK19-NEXT: [[TMP60:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 13476 // CHECK19-NEXT: [[TMP61:%.*]] = load i32, i32* [[N_ADDR]], align 4 13477 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP61]], 10 13478 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 13479 // CHECK19: omp_if.then: 13480 // CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 13481 // CHECK19-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* 13482 // CHECK19-NEXT: store i32 [[TMP58]], i32* [[TMP63]], align 4 13483 // CHECK19-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 13484 // CHECK19-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* 13485 // CHECK19-NEXT: store i32 [[TMP58]], i32* [[TMP65]], align 4 13486 // CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 13487 // CHECK19-NEXT: store i8* null, i8** [[TMP66]], align 4 13488 // CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 13489 // CHECK19-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* 13490 // CHECK19-NEXT: store i32 [[TMP60]], i32* [[TMP68]], align 4 13491 // CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 13492 // CHECK19-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* 13493 // CHECK19-NEXT: store i32 [[TMP60]], i32* [[TMP70]], align 4 13494 // CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 13495 // CHECK19-NEXT: store i8* null, i8** [[TMP71]], align 4 13496 // CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 13497 // CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 13498 // CHECK19-NEXT: [[TMP74:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146.region_id, i32 2, i8** [[TMP72]], i8** [[TMP73]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 13499 // CHECK19-NEXT: [[TMP75:%.*]] = icmp ne i32 [[TMP74]], 0 13500 // CHECK19-NEXT: br i1 [[TMP75]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]] 13501 // CHECK19: omp_offload.failed9: 13502 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i32 [[TMP58]], i32 [[TMP60]]) #[[ATTR4]] 13503 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT10]] 13504 // CHECK19: omp_offload.cont10: 13505 // CHECK19-NEXT: br label [[OMP_IF_END:%.*]] 13506 // CHECK19: omp_if.else: 13507 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i32 [[TMP58]], i32 [[TMP60]]) #[[ATTR4]] 13508 // CHECK19-NEXT: br label [[OMP_IF_END]] 13509 // CHECK19: omp_if.end: 13510 // CHECK19-NEXT: [[TMP76:%.*]] = load i32, i32* [[A]], align 4 13511 // CHECK19-NEXT: store i32 [[TMP76]], i32* [[DOTCAPTURE_EXPR_]], align 4 13512 // CHECK19-NEXT: [[TMP77:%.*]] = load i32, i32* [[A]], align 4 13513 // CHECK19-NEXT: store i32 [[TMP77]], i32* [[A_CASTED11]], align 4 13514 // CHECK19-NEXT: [[TMP78:%.*]] = load i32, i32* [[A_CASTED11]], align 4 13515 // CHECK19-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 13516 // CHECK19-NEXT: store i32 [[TMP79]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 13517 // CHECK19-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 13518 // CHECK19-NEXT: [[TMP81:%.*]] = load i32, i32* [[N_ADDR]], align 4 13519 // CHECK19-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP81]], 20 13520 // CHECK19-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]] 13521 // CHECK19: omp_if.then13: 13522 // CHECK19-NEXT: [[TMP82:%.*]] = mul nuw i32 [[TMP1]], 4 13523 // CHECK19-NEXT: [[TMP83:%.*]] = sext i32 [[TMP82]] to i64 13524 // CHECK19-NEXT: [[TMP84:%.*]] = mul nuw i32 5, [[TMP3]] 13525 // CHECK19-NEXT: [[TMP85:%.*]] = mul nuw i32 [[TMP84]], 8 13526 // CHECK19-NEXT: [[TMP86:%.*]] = sext i32 [[TMP85]] to i64 13527 // CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 13528 // CHECK19-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* 13529 // CHECK19-NEXT: store i32 [[TMP78]], i32* [[TMP88]], align 4 13530 // CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 13531 // CHECK19-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* 13532 // CHECK19-NEXT: store i32 [[TMP78]], i32* [[TMP90]], align 4 13533 // CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 13534 // CHECK19-NEXT: store i64 4, i64* [[TMP91]], align 4 13535 // CHECK19-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 13536 // CHECK19-NEXT: store i8* null, i8** [[TMP92]], align 4 13537 // CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 13538 // CHECK19-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to [10 x float]** 13539 // CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP94]], align 4 13540 // CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 13541 // CHECK19-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** 13542 // CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 4 13543 // CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 13544 // CHECK19-NEXT: store i64 40, i64* [[TMP97]], align 4 13545 // CHECK19-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 13546 // CHECK19-NEXT: store i8* null, i8** [[TMP98]], align 4 13547 // CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 13548 // CHECK19-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* 13549 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP100]], align 4 13550 // CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 13551 // CHECK19-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* 13552 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP102]], align 4 13553 // CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 13554 // CHECK19-NEXT: store i64 4, i64* [[TMP103]], align 4 13555 // CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 13556 // CHECK19-NEXT: store i8* null, i8** [[TMP104]], align 4 13557 // CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 13558 // CHECK19-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to float** 13559 // CHECK19-NEXT: store float* [[VLA]], float** [[TMP106]], align 4 13560 // CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 13561 // CHECK19-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** 13562 // CHECK19-NEXT: store float* [[VLA]], float** [[TMP108]], align 4 13563 // CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 13564 // CHECK19-NEXT: store i64 [[TMP83]], i64* [[TMP109]], align 4 13565 // CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 13566 // CHECK19-NEXT: store i8* null, i8** [[TMP110]], align 4 13567 // CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 13568 // CHECK19-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** 13569 // CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 4 13570 // CHECK19-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 13571 // CHECK19-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** 13572 // CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 4 13573 // CHECK19-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 13574 // CHECK19-NEXT: store i64 400, i64* [[TMP115]], align 4 13575 // CHECK19-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 13576 // CHECK19-NEXT: store i8* null, i8** [[TMP116]], align 4 13577 // CHECK19-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 13578 // CHECK19-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32* 13579 // CHECK19-NEXT: store i32 5, i32* [[TMP118]], align 4 13580 // CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 13581 // CHECK19-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32* 13582 // CHECK19-NEXT: store i32 5, i32* [[TMP120]], align 4 13583 // CHECK19-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 13584 // CHECK19-NEXT: store i64 4, i64* [[TMP121]], align 4 13585 // CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 13586 // CHECK19-NEXT: store i8* null, i8** [[TMP122]], align 4 13587 // CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 13588 // CHECK19-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* 13589 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP124]], align 4 13590 // CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 13591 // CHECK19-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* 13592 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 13593 // CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 13594 // CHECK19-NEXT: store i64 4, i64* [[TMP127]], align 4 13595 // CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 13596 // CHECK19-NEXT: store i8* null, i8** [[TMP128]], align 4 13597 // CHECK19-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 13598 // CHECK19-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to double** 13599 // CHECK19-NEXT: store double* [[VLA1]], double** [[TMP130]], align 4 13600 // CHECK19-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 13601 // CHECK19-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** 13602 // CHECK19-NEXT: store double* [[VLA1]], double** [[TMP132]], align 4 13603 // CHECK19-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 13604 // CHECK19-NEXT: store i64 [[TMP86]], i64* [[TMP133]], align 4 13605 // CHECK19-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 13606 // CHECK19-NEXT: store i8* null, i8** [[TMP134]], align 4 13607 // CHECK19-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 13608 // CHECK19-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** 13609 // CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 13610 // CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 13611 // CHECK19-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** 13612 // CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 13613 // CHECK19-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 13614 // CHECK19-NEXT: store i64 12, i64* [[TMP139]], align 4 13615 // CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 13616 // CHECK19-NEXT: store i8* null, i8** [[TMP140]], align 4 13617 // CHECK19-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 13618 // CHECK19-NEXT: [[TMP142:%.*]] = bitcast i8** [[TMP141]] to i32* 13619 // CHECK19-NEXT: store i32 [[TMP80]], i32* [[TMP142]], align 4 13620 // CHECK19-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 13621 // CHECK19-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* 13622 // CHECK19-NEXT: store i32 [[TMP80]], i32* [[TMP144]], align 4 13623 // CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 13624 // CHECK19-NEXT: store i64 4, i64* [[TMP145]], align 4 13625 // CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 13626 // CHECK19-NEXT: store i8* null, i8** [[TMP146]], align 4 13627 // CHECK19-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 13628 // CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 13629 // CHECK19-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 13630 // CHECK19-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP147]], i8** [[TMP148]], i64* [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 13631 // CHECK19-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 13632 // CHECK19-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] 13633 // CHECK19: omp_offload.failed17: 13634 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] 13635 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT18]] 13636 // CHECK19: omp_offload.cont18: 13637 // CHECK19-NEXT: br label [[OMP_IF_END20:%.*]] 13638 // CHECK19: omp_if.else19: 13639 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] 13640 // CHECK19-NEXT: br label [[OMP_IF_END20]] 13641 // CHECK19: omp_if.end20: 13642 // CHECK19-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 13643 // CHECK19-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 13644 // CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) 13645 // CHECK19-NEXT: ret i32 [[TMP152]] 13646 // 13647 // 13648 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 13649 // CHECK19-SAME: () #[[ATTR2:[0-9]+]] { 13650 // CHECK19-NEXT: entry: 13651 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 13652 // CHECK19-NEXT: ret void 13653 // 13654 // 13655 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined. 13656 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 13657 // CHECK19-NEXT: entry: 13658 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 13659 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 13660 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13661 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 13662 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13663 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13664 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 13665 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13666 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4 13667 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 13668 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 13669 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13670 // CHECK19-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 13671 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 13672 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13673 // CHECK19-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 13674 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 13675 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 13676 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13677 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 13678 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13679 // CHECK19: cond.true: 13680 // CHECK19-NEXT: br label [[COND_END:%.*]] 13681 // CHECK19: cond.false: 13682 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13683 // CHECK19-NEXT: br label [[COND_END]] 13684 // CHECK19: cond.end: 13685 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 13686 // CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 13687 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13688 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 13689 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13690 // CHECK19: omp.inner.for.cond: 13691 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 13692 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13693 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 13694 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13695 // CHECK19: omp.inner.for.body: 13696 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 13697 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 13698 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 13699 // CHECK19-NEXT: store i32 [[ADD]], i32* [[I]], align 4 13700 // CHECK19-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 13701 // CHECK19-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 13702 // CHECK19-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 13703 // CHECK19: .cancel.exit: 13704 // CHECK19-NEXT: br label [[CANCEL_EXIT:%.*]] 13705 // CHECK19: .cancel.continue: 13706 // CHECK19-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 13707 // CHECK19-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 13708 // CHECK19-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 13709 // CHECK19: .cancel.exit2: 13710 // CHECK19-NEXT: br label [[CANCEL_EXIT]] 13711 // CHECK19: .cancel.continue3: 13712 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13713 // CHECK19: omp.body.continue: 13714 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13715 // CHECK19: omp.inner.for.inc: 13716 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 13717 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 13718 // CHECK19-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 13719 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] 13720 // CHECK19: omp.inner.for.end: 13721 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13722 // CHECK19: omp.loop.exit: 13723 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 13724 // CHECK19-NEXT: br label [[CANCEL_CONT:%.*]] 13725 // CHECK19: cancel.cont: 13726 // CHECK19-NEXT: ret void 13727 // CHECK19: cancel.exit: 13728 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 13729 // CHECK19-NEXT: br label [[CANCEL_CONT]] 13730 // 13731 // 13732 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110 13733 // CHECK19-SAME: (i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 13734 // CHECK19-NEXT: entry: 13735 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 13736 // CHECK19-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 13737 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 13738 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 13739 // CHECK19-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 13740 // CHECK19-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 13741 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 13742 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 13743 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 13744 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) 13745 // CHECK19-NEXT: ret void 13746 // 13747 // 13748 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..1 13749 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 13750 // CHECK19-NEXT: entry: 13751 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 13752 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 13753 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 13754 // CHECK19-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 13755 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13756 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 13757 // CHECK19-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 13758 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13759 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13760 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 13761 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13762 // CHECK19-NEXT: [[I:%.*]] = alloca i32, align 4 13763 // CHECK19-NEXT: [[K1:%.*]] = alloca i64, align 8 13764 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 13765 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 13766 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 13767 // CHECK19-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 13768 // CHECK19-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 13769 // CHECK19-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 13770 // CHECK19-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 13771 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13772 // CHECK19-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 13773 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 13774 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13775 // CHECK19-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 13776 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 13777 // CHECK19-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 13778 // CHECK19-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) 13779 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 13780 // CHECK19: omp.dispatch.cond: 13781 // CHECK19-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 13782 // CHECK19-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 13783 // CHECK19-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 13784 // CHECK19: omp.dispatch.body: 13785 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13786 // CHECK19-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 13787 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13788 // CHECK19: omp.inner.for.cond: 13789 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13790 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 13791 // CHECK19-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 13792 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13793 // CHECK19: omp.inner.for.body: 13794 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13795 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 13796 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 13797 // CHECK19-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !13 13798 // CHECK19-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !13 13799 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13800 // CHECK19-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 13801 // CHECK19-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 13802 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] 13803 // CHECK19-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !13 13804 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !13 13805 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 13806 // CHECK19-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !13 13807 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13808 // CHECK19: omp.body.continue: 13809 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13810 // CHECK19: omp.inner.for.inc: 13811 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13812 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 13813 // CHECK19-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13814 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 13815 // CHECK19: omp.inner.for.end: 13816 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 13817 // CHECK19: omp.dispatch.inc: 13818 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] 13819 // CHECK19: omp.dispatch.end: 13820 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13821 // CHECK19-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 13822 // CHECK19-NEXT: br i1 [[TMP14]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 13823 // CHECK19: .omp.linear.pu: 13824 // CHECK19-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8 13825 // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP15]], 27 13826 // CHECK19-NEXT: store i64 [[ADD5]], i64* [[TMP0]], align 8 13827 // CHECK19-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 13828 // CHECK19: .omp.linear.pu.done: 13829 // CHECK19-NEXT: ret void 13830 // 13831 // 13832 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 13833 // CHECK19-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR2]] { 13834 // CHECK19-NEXT: entry: 13835 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 13836 // CHECK19-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 13837 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 13838 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 13839 // CHECK19-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 13840 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 13841 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 13842 // CHECK19-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 13843 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 13844 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 13845 // CHECK19-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 13846 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 13847 // CHECK19-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 13848 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 13849 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 13850 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 13851 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 13852 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 13853 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 13854 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 13855 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 13856 // CHECK19-NEXT: ret void 13857 // 13858 // 13859 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..2 13860 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR3]] { 13861 // CHECK19-NEXT: entry: 13862 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 13863 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 13864 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 13865 // CHECK19-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 13866 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 13867 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 13868 // CHECK19-NEXT: [[TMP:%.*]] = alloca i64, align 4 13869 // CHECK19-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 13870 // CHECK19-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 13871 // CHECK19-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 13872 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 13873 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 13874 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 13875 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13876 // CHECK19-NEXT: [[IT:%.*]] = alloca i64, align 8 13877 // CHECK19-NEXT: [[LIN2:%.*]] = alloca i32, align 4 13878 // CHECK19-NEXT: [[A3:%.*]] = alloca i32, align 4 13879 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 13880 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 13881 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 13882 // CHECK19-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 13883 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 13884 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 13885 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 13886 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 13887 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 13888 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 13889 // CHECK19-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 13890 // CHECK19-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 13891 // CHECK19-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 13892 // CHECK19-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 13893 // CHECK19-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 13894 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13895 // CHECK19-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 13896 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 13897 // CHECK19-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 13898 // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 13899 // CHECK19-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13900 // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 13901 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13902 // CHECK19: cond.true: 13903 // CHECK19-NEXT: br label [[COND_END:%.*]] 13904 // CHECK19: cond.false: 13905 // CHECK19-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13906 // CHECK19-NEXT: br label [[COND_END]] 13907 // CHECK19: cond.end: 13908 // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 13909 // CHECK19-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 13910 // CHECK19-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 13911 // CHECK19-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 13912 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13913 // CHECK19: omp.inner.for.cond: 13914 // CHECK19-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13915 // CHECK19-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 13916 // CHECK19-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 13917 // CHECK19-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13918 // CHECK19: omp.inner.for.body: 13919 // CHECK19-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13920 // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 13921 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 13922 // CHECK19-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 13923 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 13924 // CHECK19-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 13925 // CHECK19-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13926 // CHECK19-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 13927 // CHECK19-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 13928 // CHECK19-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 13929 // CHECK19-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 13930 // CHECK19-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4 13931 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 13932 // CHECK19-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 13933 // CHECK19-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13934 // CHECK19-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 13935 // CHECK19-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 13936 // CHECK19-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 13937 // CHECK19-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 13938 // CHECK19-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4 13939 // CHECK19-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4 13940 // CHECK19-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 13941 // CHECK19-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 13942 // CHECK19-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 13943 // CHECK19-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4 13944 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13945 // CHECK19: omp.body.continue: 13946 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13947 // CHECK19: omp.inner.for.inc: 13948 // CHECK19-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 13949 // CHECK19-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 13950 // CHECK19-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8 13951 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] 13952 // CHECK19: omp.inner.for.end: 13953 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13954 // CHECK19: omp.loop.exit: 13955 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 13956 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13957 // CHECK19-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 13958 // CHECK19-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 13959 // CHECK19: .omp.linear.pu: 13960 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 13961 // CHECK19-NEXT: [[CONV16:%.*]] = sext i32 [[TMP20]] to i64 13962 // CHECK19-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 13963 // CHECK19-NEXT: [[MUL17:%.*]] = mul i64 4, [[TMP21]] 13964 // CHECK19-NEXT: [[ADD18:%.*]] = add i64 [[CONV16]], [[MUL17]] 13965 // CHECK19-NEXT: [[CONV19:%.*]] = trunc i64 [[ADD18]] to i32 13966 // CHECK19-NEXT: store i32 [[CONV19]], i32* [[LIN_ADDR]], align 4 13967 // CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 13968 // CHECK19-NEXT: [[CONV20:%.*]] = sext i32 [[TMP22]] to i64 13969 // CHECK19-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 13970 // CHECK19-NEXT: [[MUL21:%.*]] = mul i64 4, [[TMP23]] 13971 // CHECK19-NEXT: [[ADD22:%.*]] = add i64 [[CONV20]], [[MUL21]] 13972 // CHECK19-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD22]] to i32 13973 // CHECK19-NEXT: store i32 [[CONV23]], i32* [[A_ADDR]], align 4 13974 // CHECK19-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 13975 // CHECK19: .omp.linear.pu.done: 13976 // CHECK19-NEXT: ret void 13977 // 13978 // 13979 // CHECK19-LABEL: define {{[^@]+}}@.omp_task_privates_map. 13980 // CHECK19-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i16** noalias [[TMP1:%.*]], [3 x i8*]** noalias [[TMP2:%.*]], [3 x i8*]** noalias [[TMP3:%.*]], [3 x i64]** noalias [[TMP4:%.*]]) #[[ATTR6:[0-9]+]] { 13981 // CHECK19-NEXT: entry: 13982 // CHECK19-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 4 13983 // CHECK19-NEXT: [[DOTADDR1:%.*]] = alloca i16**, align 4 13984 // CHECK19-NEXT: [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 4 13985 // CHECK19-NEXT: [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 4 13986 // CHECK19-NEXT: [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 4 13987 // CHECK19-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 4 13988 // CHECK19-NEXT: store i16** [[TMP1]], i16*** [[DOTADDR1]], align 4 13989 // CHECK19-NEXT: store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 4 13990 // CHECK19-NEXT: store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 4 13991 // CHECK19-NEXT: store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 4 13992 // CHECK19-NEXT: [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 4 13993 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0 13994 // CHECK19-NEXT: [[TMP7:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 4 13995 // CHECK19-NEXT: store [3 x i64]* [[TMP6]], [3 x i64]** [[TMP7]], align 4 13996 // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1 13997 // CHECK19-NEXT: [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 4 13998 // CHECK19-NEXT: store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 4 13999 // CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2 14000 // CHECK19-NEXT: [[TMP11:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 4 14001 // CHECK19-NEXT: store [3 x i8*]* [[TMP10]], [3 x i8*]** [[TMP11]], align 4 14002 // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3 14003 // CHECK19-NEXT: [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 4 14004 // CHECK19-NEXT: store i16* [[TMP12]], i16** [[TMP13]], align 4 14005 // CHECK19-NEXT: ret void 14006 // 14007 // 14008 // CHECK19-LABEL: define {{[^@]+}}@.omp_task_entry. 14009 // CHECK19-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] { 14010 // CHECK19-NEXT: entry: 14011 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 14012 // CHECK19-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 14013 // CHECK19-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 14014 // CHECK19-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 14015 // CHECK19-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 14016 // CHECK19-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 14017 // CHECK19-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 4 14018 // CHECK19-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 4 14019 // CHECK19-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 4 14020 // CHECK19-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 4 14021 // CHECK19-NEXT: [[AA_CASTED_I:%.*]] = alloca i32, align 4 14022 // CHECK19-NEXT: [[LIN_CASTED_I:%.*]] = alloca i32, align 4 14023 // CHECK19-NEXT: [[A_CASTED_I:%.*]] = alloca i32, align 4 14024 // CHECK19-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 14025 // CHECK19-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 14026 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 14027 // CHECK19-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 14028 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 14029 // CHECK19-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 14030 // CHECK19-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 14031 // CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 14032 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 14033 // CHECK19-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 14034 // CHECK19-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 14035 // CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 14036 // CHECK19-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* 14037 // CHECK19-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 14038 // CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) 14039 // CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) 14040 // CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) 14041 // CHECK19-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) 14042 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 14043 // CHECK19-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !25 14044 // CHECK19-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 14045 // CHECK19-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 14046 // CHECK19-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !25 14047 // CHECK19-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !25 14048 // CHECK19-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !25 14049 // CHECK19-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 14050 // CHECK19-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 14051 // CHECK19-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* 14052 // CHECK19-NEXT: call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] 14053 // CHECK19-NEXT: [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !25 14054 // CHECK19-NEXT: [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !25 14055 // CHECK19-NEXT: [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !25 14056 // CHECK19-NEXT: [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !25 14057 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i32 0, i32 0 14058 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i32 0, i32 0 14059 // CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i32 0, i32 0 14060 // CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1 14061 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2 14062 // CHECK19-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 14063 // CHECK19-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 14064 // CHECK19-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] 14065 // CHECK19: omp_offload.failed.i: 14066 // CHECK19-NEXT: [[TMP27:%.*]] = load i16, i16* [[TMP16]], align 2 14067 // CHECK19-NEXT: [[CONV_I:%.*]] = bitcast i32* [[AA_CASTED_I]] to i16* 14068 // CHECK19-NEXT: store i16 [[TMP27]], i16* [[CONV_I]], align 2, !noalias !25 14069 // CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[AA_CASTED_I]], align 4, !noalias !25 14070 // CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP23]], align 4 14071 // CHECK19-NEXT: store i32 [[TMP29]], i32* [[LIN_CASTED_I]], align 4, !noalias !25 14072 // CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[LIN_CASTED_I]], align 4, !noalias !25 14073 // CHECK19-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP24]], align 4 14074 // CHECK19-NEXT: store i32 [[TMP31]], i32* [[A_CASTED_I]], align 4, !noalias !25 14075 // CHECK19-NEXT: [[TMP32:%.*]] = load i32, i32* [[A_CASTED_I]], align 4, !noalias !25 14076 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138(i32 [[TMP28]], i32 [[TMP30]], i32 [[TMP32]]) #[[ATTR4]] 14077 // CHECK19-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] 14078 // CHECK19: .omp_outlined..3.exit: 14079 // CHECK19-NEXT: ret i32 0 14080 // 14081 // 14082 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 14083 // CHECK19-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] { 14084 // CHECK19-NEXT: entry: 14085 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 14086 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 14087 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 14088 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 14089 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 14090 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 14091 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 14092 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 14093 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 14094 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 14095 // CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 14096 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 14097 // CHECK19-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 14098 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 14099 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 14100 // CHECK19-NEXT: ret void 14101 // 14102 // 14103 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..4 14104 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR3]] { 14105 // CHECK19-NEXT: entry: 14106 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 14107 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 14108 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 14109 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 14110 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14111 // CHECK19-NEXT: [[TMP:%.*]] = alloca i16, align 2 14112 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14113 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14114 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 14115 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 14116 // CHECK19-NEXT: [[IT:%.*]] = alloca i16, align 2 14117 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 14118 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 14119 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 14120 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 14121 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 14122 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14123 // CHECK19-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 14124 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 14125 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 14126 // CHECK19-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 14127 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 14128 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 14129 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 14130 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 14131 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 14132 // CHECK19: cond.true: 14133 // CHECK19-NEXT: br label [[COND_END:%.*]] 14134 // CHECK19: cond.false: 14135 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 14136 // CHECK19-NEXT: br label [[COND_END]] 14137 // CHECK19: cond.end: 14138 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 14139 // CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 14140 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14141 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 14142 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14143 // CHECK19: omp.inner.for.cond: 14144 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 14145 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 14146 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 14147 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14148 // CHECK19: omp.inner.for.body: 14149 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 14150 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 14151 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 14152 // CHECK19-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 14153 // CHECK19-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2 14154 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 14155 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 14156 // CHECK19-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4 14157 // CHECK19-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4 14158 // CHECK19-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 14159 // CHECK19-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 14160 // CHECK19-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 14161 // CHECK19-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4 14162 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14163 // CHECK19: omp.body.continue: 14164 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14165 // CHECK19: omp.inner.for.inc: 14166 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 14167 // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 14168 // CHECK19-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 14169 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] 14170 // CHECK19: omp.inner.for.end: 14171 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 14172 // CHECK19: omp.loop.exit: 14173 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 14174 // CHECK19-NEXT: ret void 14175 // 14176 // 14177 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 14178 // CHECK19-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 14179 // CHECK19-NEXT: entry: 14180 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 14181 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 14182 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 14183 // CHECK19-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 14184 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 14185 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 14186 // CHECK19-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 14187 // CHECK19-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 14188 // CHECK19-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 14189 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 14190 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 14191 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 14192 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 14193 // CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 14194 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 14195 // CHECK19-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 14196 // CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 14197 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 14198 // CHECK19-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 14199 // CHECK19-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 14200 // CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 14201 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 14202 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 14203 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 14204 // CHECK19-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 14205 // CHECK19-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 14206 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 14207 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 14208 // CHECK19-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 14209 // CHECK19-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 14210 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 14211 // CHECK19-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 14212 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 14213 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 14214 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 14215 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 14216 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 14217 // CHECK19-NEXT: ret void 14218 // 14219 // 14220 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..7 14221 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 14222 // CHECK19-NEXT: entry: 14223 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 14224 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 14225 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 14226 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 14227 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 14228 // CHECK19-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 14229 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 14230 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 14231 // CHECK19-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 14232 // CHECK19-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 14233 // CHECK19-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 14234 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 14235 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14236 // CHECK19-NEXT: [[TMP:%.*]] = alloca i8, align 1 14237 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14238 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14239 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 14240 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 14241 // CHECK19-NEXT: [[IT:%.*]] = alloca i8, align 1 14242 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 14243 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 14244 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 14245 // CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 14246 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 14247 // CHECK19-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 14248 // CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 14249 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 14250 // CHECK19-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 14251 // CHECK19-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 14252 // CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 14253 // CHECK19-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 14254 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 14255 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 14256 // CHECK19-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 14257 // CHECK19-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 14258 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 14259 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 14260 // CHECK19-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 14261 // CHECK19-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 14262 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14263 // CHECK19-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 14264 // CHECK19-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 14265 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 14266 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 14267 // CHECK19-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 14268 // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 14269 // CHECK19-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 14270 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 14271 // CHECK19: omp.dispatch.cond: 14272 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 14273 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 14274 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 14275 // CHECK19: cond.true: 14276 // CHECK19-NEXT: br label [[COND_END:%.*]] 14277 // CHECK19: cond.false: 14278 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 14279 // CHECK19-NEXT: br label [[COND_END]] 14280 // CHECK19: cond.end: 14281 // CHECK19-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 14282 // CHECK19-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 14283 // CHECK19-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14284 // CHECK19-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 14285 // CHECK19-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 14286 // CHECK19-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 14287 // CHECK19-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 14288 // CHECK19-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 14289 // CHECK19: omp.dispatch.body: 14290 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14291 // CHECK19: omp.inner.for.cond: 14292 // CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 14293 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 14294 // CHECK19-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 14295 // CHECK19-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14296 // CHECK19: omp.inner.for.body: 14297 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 14298 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 14299 // CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 14300 // CHECK19-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 14301 // CHECK19-NEXT: store i8 [[CONV]], i8* [[IT]], align 1 14302 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4 14303 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 14304 // CHECK19-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 14305 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 14306 // CHECK19-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 14307 // CHECK19-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 14308 // CHECK19-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 14309 // CHECK19-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 14310 // CHECK19-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4 14311 // CHECK19-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 14312 // CHECK19-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4 14313 // CHECK19-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 14314 // CHECK19-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 14315 // CHECK19-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 14316 // CHECK19-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4 14317 // CHECK19-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 14318 // CHECK19-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 14319 // CHECK19-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8 14320 // CHECK19-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 14321 // CHECK19-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8 14322 // CHECK19-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 14323 // CHECK19-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 14324 // CHECK19-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 14325 // CHECK19-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8 14326 // CHECK19-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 14327 // CHECK19-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8 14328 // CHECK19-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 14329 // CHECK19-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4 14330 // CHECK19-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 14331 // CHECK19-NEXT: store i64 [[ADD20]], i64* [[X]], align 4 14332 // CHECK19-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 14333 // CHECK19-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4 14334 // CHECK19-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 14335 // CHECK19-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 14336 // CHECK19-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 14337 // CHECK19-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4 14338 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14339 // CHECK19: omp.body.continue: 14340 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14341 // CHECK19: omp.inner.for.inc: 14342 // CHECK19-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 14343 // CHECK19-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 14344 // CHECK19-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4 14345 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] 14346 // CHECK19: omp.inner.for.end: 14347 // CHECK19-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 14348 // CHECK19: omp.dispatch.inc: 14349 // CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14350 // CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 14351 // CHECK19-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 14352 // CHECK19-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 14353 // CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 14354 // CHECK19-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 14355 // CHECK19-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 14356 // CHECK19-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 14357 // CHECK19-NEXT: br label [[OMP_DISPATCH_COND]] 14358 // CHECK19: omp.dispatch.end: 14359 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 14360 // CHECK19-NEXT: ret void 14361 // 14362 // 14363 // CHECK19-LABEL: define {{[^@]+}}@_Z3bari 14364 // CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 14365 // CHECK19-NEXT: entry: 14366 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14367 // CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4 14368 // CHECK19-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 14369 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14370 // CHECK19-NEXT: store i32 0, i32* [[A]], align 4 14371 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 14372 // CHECK19-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 14373 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 14374 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 14375 // CHECK19-NEXT: store i32 [[ADD]], i32* [[A]], align 4 14376 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 14377 // CHECK19-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 14378 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 14379 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 14380 // CHECK19-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 14381 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 14382 // CHECK19-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 14383 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 14384 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 14385 // CHECK19-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 14386 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 14387 // CHECK19-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 14388 // CHECK19-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 14389 // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 14390 // CHECK19-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 14391 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 14392 // CHECK19-NEXT: ret i32 [[TMP8]] 14393 // 14394 // 14395 // CHECK19-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 14396 // CHECK19-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 14397 // CHECK19-NEXT: entry: 14398 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 14399 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14400 // CHECK19-NEXT: [[B:%.*]] = alloca i32, align 4 14401 // CHECK19-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 14402 // CHECK19-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 14403 // CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 14404 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 14405 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 14406 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 14407 // CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 14408 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 14409 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14410 // CHECK19-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 14411 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 14412 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 14413 // CHECK19-NEXT: store i32 [[ADD]], i32* [[B]], align 4 14414 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 14415 // CHECK19-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 14416 // CHECK19-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 14417 // CHECK19-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 14418 // CHECK19-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 14419 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 14420 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4 14421 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 14422 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 14423 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 14424 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 14425 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 14426 // CHECK19: omp_if.then: 14427 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 14428 // CHECK19-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]] 14429 // CHECK19-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2 14430 // CHECK19-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 14431 // CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 14432 // CHECK19-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 14433 // CHECK19-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4 14434 // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 14435 // CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 14436 // CHECK19-NEXT: store double* [[A]], double** [[TMP13]], align 4 14437 // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 14438 // CHECK19-NEXT: store i64 8, i64* [[TMP14]], align 4 14439 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 14440 // CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 14441 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 14442 // CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 14443 // CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 14444 // CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 14445 // CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 14446 // CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 14447 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 14448 // CHECK19-NEXT: store i64 4, i64* [[TMP20]], align 4 14449 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 14450 // CHECK19-NEXT: store i8* null, i8** [[TMP21]], align 4 14451 // CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 14452 // CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 14453 // CHECK19-NEXT: store i32 2, i32* [[TMP23]], align 4 14454 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 14455 // CHECK19-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 14456 // CHECK19-NEXT: store i32 2, i32* [[TMP25]], align 4 14457 // CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 14458 // CHECK19-NEXT: store i64 4, i64* [[TMP26]], align 4 14459 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 14460 // CHECK19-NEXT: store i8* null, i8** [[TMP27]], align 4 14461 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 14462 // CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* 14463 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 14464 // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 14465 // CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* 14466 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 14467 // CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 14468 // CHECK19-NEXT: store i64 4, i64* [[TMP32]], align 4 14469 // CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 14470 // CHECK19-NEXT: store i8* null, i8** [[TMP33]], align 4 14471 // CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 14472 // CHECK19-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 14473 // CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 14474 // CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 14475 // CHECK19-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 14476 // CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 14477 // CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 14478 // CHECK19-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 14479 // CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 14480 // CHECK19-NEXT: store i8* null, i8** [[TMP39]], align 4 14481 // CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 14482 // CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 14483 // CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 14484 // CHECK19-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 14485 // CHECK19-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 14486 // CHECK19-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 14487 // CHECK19: omp_offload.failed: 14488 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 14489 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] 14490 // CHECK19: omp_offload.cont: 14491 // CHECK19-NEXT: br label [[OMP_IF_END:%.*]] 14492 // CHECK19: omp_if.else: 14493 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 14494 // CHECK19-NEXT: br label [[OMP_IF_END]] 14495 // CHECK19: omp_if.end: 14496 // CHECK19-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] 14497 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] 14498 // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 14499 // CHECK19-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 14500 // CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 14501 // CHECK19-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 14502 // CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] 14503 // CHECK19-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 14504 // CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 14505 // CHECK19-NEXT: ret i32 [[ADD3]] 14506 // 14507 // 14508 // CHECK19-LABEL: define {{[^@]+}}@_ZL7fstatici 14509 // CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 14510 // CHECK19-NEXT: entry: 14511 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14512 // CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4 14513 // CHECK19-NEXT: [[AA:%.*]] = alloca i16, align 2 14514 // CHECK19-NEXT: [[AAA:%.*]] = alloca i8, align 1 14515 // CHECK19-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 14516 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 14517 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 14518 // CHECK19-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 14519 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 14520 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 14521 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 14522 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14523 // CHECK19-NEXT: store i32 0, i32* [[A]], align 4 14524 // CHECK19-NEXT: store i16 0, i16* [[AA]], align 2 14525 // CHECK19-NEXT: store i8 0, i8* [[AAA]], align 1 14526 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 14527 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 14528 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 14529 // CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 14530 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 14531 // CHECK19-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 14532 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 14533 // CHECK19-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 14534 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 14535 // CHECK19-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 14536 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 14537 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 14538 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 14539 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 14540 // CHECK19: omp_if.then: 14541 // CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 14542 // CHECK19-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 14543 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 14544 // CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 14545 // CHECK19-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 14546 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 14547 // CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 14548 // CHECK19-NEXT: store i8* null, i8** [[TMP11]], align 4 14549 // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 14550 // CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 14551 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 14552 // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 14553 // CHECK19-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 14554 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 14555 // CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 14556 // CHECK19-NEXT: store i8* null, i8** [[TMP16]], align 4 14557 // CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 14558 // CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 14559 // CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 14560 // CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 14561 // CHECK19-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 14562 // CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 14563 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 14564 // CHECK19-NEXT: store i8* null, i8** [[TMP21]], align 4 14565 // CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 14566 // CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 14567 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 14568 // CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 14569 // CHECK19-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 14570 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 14571 // CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 14572 // CHECK19-NEXT: store i8* null, i8** [[TMP26]], align 4 14573 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 14574 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 14575 // CHECK19-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 14576 // CHECK19-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 14577 // CHECK19-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 14578 // CHECK19: omp_offload.failed: 14579 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 14580 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] 14581 // CHECK19: omp_offload.cont: 14582 // CHECK19-NEXT: br label [[OMP_IF_END:%.*]] 14583 // CHECK19: omp_if.else: 14584 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 14585 // CHECK19-NEXT: br label [[OMP_IF_END]] 14586 // CHECK19: omp_if.end: 14587 // CHECK19-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 14588 // CHECK19-NEXT: ret i32 [[TMP31]] 14589 // 14590 // 14591 // CHECK19-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 14592 // CHECK19-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 14593 // CHECK19-NEXT: entry: 14594 // CHECK19-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14595 // CHECK19-NEXT: [[A:%.*]] = alloca i32, align 4 14596 // CHECK19-NEXT: [[AA:%.*]] = alloca i16, align 2 14597 // CHECK19-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 14598 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 14599 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 14600 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 14601 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 14602 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 14603 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14604 // CHECK19-NEXT: store i32 0, i32* [[A]], align 4 14605 // CHECK19-NEXT: store i16 0, i16* [[AA]], align 2 14606 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 14607 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 14608 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 14609 // CHECK19-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 14610 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 14611 // CHECK19-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 14612 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 14613 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 14614 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 14615 // CHECK19-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 14616 // CHECK19: omp_if.then: 14617 // CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 14618 // CHECK19-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 14619 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 14620 // CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 14621 // CHECK19-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 14622 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 14623 // CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 14624 // CHECK19-NEXT: store i8* null, i8** [[TMP9]], align 4 14625 // CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 14626 // CHECK19-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* 14627 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 14628 // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 14629 // CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 14630 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 14631 // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 14632 // CHECK19-NEXT: store i8* null, i8** [[TMP14]], align 4 14633 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 14634 // CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 14635 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 14636 // CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 14637 // CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 14638 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 14639 // CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 14640 // CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4 14641 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 14642 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 14643 // CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 14644 // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 14645 // CHECK19-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 14646 // CHECK19: omp_offload.failed: 14647 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 14648 // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] 14649 // CHECK19: omp_offload.cont: 14650 // CHECK19-NEXT: br label [[OMP_IF_END:%.*]] 14651 // CHECK19: omp_if.else: 14652 // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 14653 // CHECK19-NEXT: br label [[OMP_IF_END]] 14654 // CHECK19: omp_if.end: 14655 // CHECK19-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 14656 // CHECK19-NEXT: ret i32 [[TMP24]] 14657 // 14658 // 14659 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 14660 // CHECK19-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 14661 // CHECK19-NEXT: entry: 14662 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 14663 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 14664 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 14665 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 14666 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 14667 // CHECK19-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 14668 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 14669 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 14670 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 14671 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 14672 // CHECK19-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 14673 // CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 14674 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 14675 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 14676 // CHECK19-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 14677 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 14678 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 14679 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 14680 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 14681 // CHECK19-NEXT: ret void 14682 // 14683 // 14684 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9 14685 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 14686 // CHECK19-NEXT: entry: 14687 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 14688 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 14689 // CHECK19-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 14690 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 14691 // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 14692 // CHECK19-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 14693 // CHECK19-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 14694 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 14695 // CHECK19-NEXT: [[TMP:%.*]] = alloca i64, align 4 14696 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 14697 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 14698 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 14699 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 14700 // CHECK19-NEXT: [[IT:%.*]] = alloca i64, align 8 14701 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 14702 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 14703 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 14704 // CHECK19-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 14705 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 14706 // CHECK19-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 14707 // CHECK19-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 14708 // CHECK19-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 14709 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 14710 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 14711 // CHECK19-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 14712 // CHECK19-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 14713 // CHECK19-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 14714 // CHECK19-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 14715 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 14716 // CHECK19-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 14717 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 14718 // CHECK19-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 14719 // CHECK19-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 14720 // CHECK19-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 14721 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 14722 // CHECK19: cond.true: 14723 // CHECK19-NEXT: br label [[COND_END:%.*]] 14724 // CHECK19: cond.false: 14725 // CHECK19-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 14726 // CHECK19-NEXT: br label [[COND_END]] 14727 // CHECK19: cond.end: 14728 // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 14729 // CHECK19-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 14730 // CHECK19-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 14731 // CHECK19-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 14732 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14733 // CHECK19: omp.inner.for.cond: 14734 // CHECK19-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 14735 // CHECK19-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 14736 // CHECK19-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 14737 // CHECK19-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14738 // CHECK19: omp.inner.for.body: 14739 // CHECK19-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 14740 // CHECK19-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 14741 // CHECK19-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 14742 // CHECK19-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 14743 // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4 14744 // CHECK19-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 14745 // CHECK19-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 14746 // CHECK19-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 14747 // CHECK19-NEXT: store double [[ADD]], double* [[A]], align 4 14748 // CHECK19-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 14749 // CHECK19-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4 14750 // CHECK19-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 14751 // CHECK19-NEXT: store double [[INC]], double* [[A4]], align 4 14752 // CHECK19-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 14753 // CHECK19-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 14754 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 14755 // CHECK19-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 14756 // CHECK19-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2 14757 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14758 // CHECK19: omp.body.continue: 14759 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14760 // CHECK19: omp.inner.for.inc: 14761 // CHECK19-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 14762 // CHECK19-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 14763 // CHECK19-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 14764 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] 14765 // CHECK19: omp.inner.for.end: 14766 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 14767 // CHECK19: omp.loop.exit: 14768 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 14769 // CHECK19-NEXT: ret void 14770 // 14771 // 14772 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 14773 // CHECK19-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 14774 // CHECK19-NEXT: entry: 14775 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 14776 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 14777 // CHECK19-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 14778 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 14779 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 14780 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 14781 // CHECK19-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 14782 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 14783 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 14784 // CHECK19-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 14785 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 14786 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 14787 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 14788 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 14789 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 14790 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 14791 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 14792 // CHECK19-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 14793 // CHECK19-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 14794 // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 14795 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 14796 // CHECK19-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 14797 // CHECK19-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 14798 // CHECK19-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 14799 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 14800 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 14801 // CHECK19-NEXT: ret void 14802 // 14803 // 14804 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 14805 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 14806 // CHECK19-NEXT: entry: 14807 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 14808 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 14809 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 14810 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 14811 // CHECK19-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 14812 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 14813 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14814 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 14815 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 14816 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 14817 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 14818 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 14819 // CHECK19-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 14820 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 14821 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 14822 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 14823 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 14824 // CHECK19-NEXT: ret void 14825 // 14826 // 14827 // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 14828 // CHECK19-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 14829 // CHECK19-NEXT: entry: 14830 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 14831 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 14832 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 14833 // CHECK19-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 14834 // CHECK19-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 14835 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 14836 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 14837 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 14838 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 14839 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 14840 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 14841 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 14842 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 14843 // CHECK19-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 14844 // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 14845 // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 14846 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 14847 // CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 14848 // CHECK19-NEXT: ret void 14849 // 14850 // 14851 // CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 14852 // CHECK19-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 14853 // CHECK19-NEXT: entry: 14854 // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 14855 // CHECK19-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 14856 // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 14857 // CHECK19-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 14858 // CHECK19-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 14859 // CHECK19-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 14860 // CHECK19-NEXT: [[TMP:%.*]] = alloca i64, align 4 14861 // CHECK19-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 14862 // CHECK19-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 14863 // CHECK19-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 14864 // CHECK19-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 14865 // CHECK19-NEXT: [[I:%.*]] = alloca i64, align 8 14866 // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 14867 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 14868 // CHECK19-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 14869 // CHECK19-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 14870 // CHECK19-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 14871 // CHECK19-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 14872 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 14873 // CHECK19-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 14874 // CHECK19-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 14875 // CHECK19-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 14876 // CHECK19-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 14877 // CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 14878 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 14879 // CHECK19-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 14880 // CHECK19-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 14881 // CHECK19-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 14882 // CHECK19-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 14883 // CHECK19: cond.true: 14884 // CHECK19-NEXT: br label [[COND_END:%.*]] 14885 // CHECK19: cond.false: 14886 // CHECK19-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 14887 // CHECK19-NEXT: br label [[COND_END]] 14888 // CHECK19: cond.end: 14889 // CHECK19-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 14890 // CHECK19-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 14891 // CHECK19-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 14892 // CHECK19-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 14893 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14894 // CHECK19: omp.inner.for.cond: 14895 // CHECK19-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 14896 // CHECK19-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 14897 // CHECK19-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 14898 // CHECK19-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14899 // CHECK19: omp.inner.for.body: 14900 // CHECK19-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 14901 // CHECK19-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 14902 // CHECK19-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 14903 // CHECK19-NEXT: store i64 [[ADD]], i64* [[I]], align 8 14904 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 14905 // CHECK19-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 14906 // CHECK19-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4 14907 // CHECK19-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4 14908 // CHECK19-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 14909 // CHECK19-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 14910 // CHECK19-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 14911 // CHECK19-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4 14912 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 14913 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 14914 // CHECK19-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 14915 // CHECK19-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 14916 // CHECK19-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14917 // CHECK19: omp.body.continue: 14918 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14919 // CHECK19: omp.inner.for.inc: 14920 // CHECK19-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 14921 // CHECK19-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 14922 // CHECK19-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 14923 // CHECK19-NEXT: br label [[OMP_INNER_FOR_COND]] 14924 // CHECK19: omp.inner.for.end: 14925 // CHECK19-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 14926 // CHECK19: omp.loop.exit: 14927 // CHECK19-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 14928 // CHECK19-NEXT: ret void 14929 // 14930 // 14931 // CHECK19-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 14932 // CHECK19-SAME: () #[[ATTR6]] { 14933 // CHECK19-NEXT: entry: 14934 // CHECK19-NEXT: call void @__tgt_register_requires(i64 1) 14935 // CHECK19-NEXT: ret void 14936 // 14937 // 14938 // CHECK20-LABEL: define {{[^@]+}}@_Z7get_valv 14939 // CHECK20-SAME: () #[[ATTR0:[0-9]+]] { 14940 // CHECK20-NEXT: entry: 14941 // CHECK20-NEXT: ret i64 0 14942 // 14943 // 14944 // CHECK20-LABEL: define {{[^@]+}}@_Z3fooi 14945 // CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 14946 // CHECK20-NEXT: entry: 14947 // CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 14948 // CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4 14949 // CHECK20-NEXT: [[AA:%.*]] = alloca i16, align 2 14950 // CHECK20-NEXT: [[B:%.*]] = alloca [10 x float], align 4 14951 // CHECK20-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 14952 // CHECK20-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 14953 // CHECK20-NEXT: [[C:%.*]] = alloca [5 x [10 x double]], align 8 14954 // CHECK20-NEXT: [[__VLA_EXPR1:%.*]] = alloca i32, align 4 14955 // CHECK20-NEXT: [[D:%.*]] = alloca [[STRUCT_TT:%.*]], align 4 14956 // CHECK20-NEXT: [[K:%.*]] = alloca i64, align 8 14957 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 14958 // CHECK20-NEXT: [[LIN:%.*]] = alloca i32, align 4 14959 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 14960 // CHECK20-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 14961 // CHECK20-NEXT: [[A_CASTED2:%.*]] = alloca i32, align 4 14962 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 14963 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 14964 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 14965 // CHECK20-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 4 14966 // CHECK20-NEXT: [[A_CASTED3:%.*]] = alloca i32, align 4 14967 // CHECK20-NEXT: [[AA_CASTED4:%.*]] = alloca i32, align 4 14968 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS6:%.*]] = alloca [2 x i8*], align 4 14969 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS7:%.*]] = alloca [2 x i8*], align 4 14970 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS8:%.*]] = alloca [2 x i8*], align 4 14971 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 14972 // CHECK20-NEXT: [[A_CASTED11:%.*]] = alloca i32, align 4 14973 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 14974 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 14975 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 14976 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 14977 // CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 14978 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 14979 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 14980 // CHECK20-NEXT: store i32 0, i32* [[A]], align 4 14981 // CHECK20-NEXT: store i16 0, i16* [[AA]], align 2 14982 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 14983 // CHECK20-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 14984 // CHECK20-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 14985 // CHECK20-NEXT: [[VLA:%.*]] = alloca float, i32 [[TMP1]], align 4 14986 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 14987 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_ADDR]], align 4 14988 // CHECK20-NEXT: [[TMP4:%.*]] = mul nuw i32 5, [[TMP3]] 14989 // CHECK20-NEXT: [[VLA1:%.*]] = alloca double, i32 [[TMP4]], align 8 14990 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[__VLA_EXPR1]], align 4 14991 // CHECK20-NEXT: [[TMP5:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 1, i32 0) 14992 // CHECK20-NEXT: [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0 14993 // CHECK20-NEXT: br i1 [[TMP6]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 14994 // CHECK20: omp_offload.failed: 14995 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103() #[[ATTR4:[0-9]+]] 14996 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] 14997 // CHECK20: omp_offload.cont: 14998 // CHECK20-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 14999 // CHECK20-NEXT: store i64 [[CALL]], i64* [[K]], align 8 15000 // CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 15001 // CHECK20-NEXT: store i32 [[TMP7]], i32* [[A_CASTED]], align 4 15002 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_CASTED]], align 4 15003 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110(i32 [[TMP8]], i64* [[K]]) #[[ATTR4]] 15004 // CHECK20-NEXT: store i32 12, i32* [[LIN]], align 4 15005 // CHECK20-NEXT: [[TMP9:%.*]] = load i16, i16* [[AA]], align 2 15006 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 15007 // CHECK20-NEXT: store i16 [[TMP9]], i16* [[CONV]], align 2 15008 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[AA_CASTED]], align 4 15009 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[LIN]], align 4 15010 // CHECK20-NEXT: store i32 [[TMP11]], i32* [[LIN_CASTED]], align 4 15011 // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 15012 // CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[A]], align 4 15013 // CHECK20-NEXT: store i32 [[TMP13]], i32* [[A_CASTED2]], align 4 15014 // CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[A_CASTED2]], align 4 15015 // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 15016 // CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* 15017 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[TMP16]], align 4 15018 // CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 15019 // CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 15020 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[TMP18]], align 4 15021 // CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 15022 // CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4 15023 // CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 15024 // CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* 15025 // CHECK20-NEXT: store i32 [[TMP12]], i32* [[TMP21]], align 4 15026 // CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 15027 // CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 15028 // CHECK20-NEXT: store i32 [[TMP12]], i32* [[TMP23]], align 4 15029 // CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 15030 // CHECK20-NEXT: store i8* null, i8** [[TMP24]], align 4 15031 // CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 15032 // CHECK20-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* 15033 // CHECK20-NEXT: store i32 [[TMP14]], i32* [[TMP26]], align 4 15034 // CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 15035 // CHECK20-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* 15036 // CHECK20-NEXT: store i32 [[TMP14]], i32* [[TMP28]], align 4 15037 // CHECK20-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 15038 // CHECK20-NEXT: store i8* null, i8** [[TMP29]], align 4 15039 // CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 15040 // CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 15041 // CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0 15042 // CHECK20-NEXT: [[TMP33:%.*]] = load i16, i16* [[AA]], align 2 15043 // CHECK20-NEXT: store i16 [[TMP33]], i16* [[TMP32]], align 4 15044 // CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1 15045 // CHECK20-NEXT: [[TMP35:%.*]] = load i32, i32* [[LIN]], align 4 15046 // CHECK20-NEXT: store i32 [[TMP35]], i32* [[TMP34]], align 4 15047 // CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 2 15048 // CHECK20-NEXT: [[TMP37:%.*]] = load i32, i32* [[A]], align 4 15049 // CHECK20-NEXT: store i32 [[TMP37]], i32* [[TMP36]], align 4 15050 // CHECK20-NEXT: [[TMP38:%.*]] = call i8* @__kmpc_omp_target_task_alloc(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i32 1, i32 72, i32 12, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*), i64 -1) 15051 // CHECK20-NEXT: [[TMP39:%.*]] = bitcast i8* [[TMP38]] to %struct.kmp_task_t_with_privates* 15052 // CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 0 15053 // CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP40]], i32 0, i32 0 15054 // CHECK20-NEXT: [[TMP42:%.*]] = load i8*, i8** [[TMP41]], align 4 15055 // CHECK20-NEXT: [[TMP43:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8* 15056 // CHECK20-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP42]], i8* align 4 [[TMP43]], i32 12, i1 false) 15057 // CHECK20-NEXT: [[TMP44:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP39]], i32 0, i32 1 15058 // CHECK20-NEXT: [[TMP45:%.*]] = bitcast i8* [[TMP42]] to %struct.anon* 15059 // CHECK20-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 0 15060 // CHECK20-NEXT: [[TMP47:%.*]] = bitcast [3 x i64]* [[TMP46]] to i8* 15061 // CHECK20-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP47]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false) 15062 // CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 1 15063 // CHECK20-NEXT: [[TMP49:%.*]] = bitcast [3 x i8*]* [[TMP48]] to i8* 15064 // CHECK20-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP30]] to i8* 15065 // CHECK20-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP49]], i8* align 4 [[TMP50]], i32 12, i1 false) 15066 // CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 2 15067 // CHECK20-NEXT: [[TMP52:%.*]] = bitcast [3 x i8*]* [[TMP51]] to i8* 15068 // CHECK20-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP31]] to i8* 15069 // CHECK20-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP52]], i8* align 4 [[TMP53]], i32 12, i1 false) 15070 // CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP44]], i32 0, i32 3 15071 // CHECK20-NEXT: [[TMP55:%.*]] = load i16, i16* [[AA]], align 2 15072 // CHECK20-NEXT: store i16 [[TMP55]], i16* [[TMP54]], align 4 15073 // CHECK20-NEXT: [[TMP56:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]], i8* [[TMP38]]) 15074 // CHECK20-NEXT: [[TMP57:%.*]] = load i32, i32* [[A]], align 4 15075 // CHECK20-NEXT: store i32 [[TMP57]], i32* [[A_CASTED3]], align 4 15076 // CHECK20-NEXT: [[TMP58:%.*]] = load i32, i32* [[A_CASTED3]], align 4 15077 // CHECK20-NEXT: [[TMP59:%.*]] = load i16, i16* [[AA]], align 2 15078 // CHECK20-NEXT: [[CONV5:%.*]] = bitcast i32* [[AA_CASTED4]] to i16* 15079 // CHECK20-NEXT: store i16 [[TMP59]], i16* [[CONV5]], align 2 15080 // CHECK20-NEXT: [[TMP60:%.*]] = load i32, i32* [[AA_CASTED4]], align 4 15081 // CHECK20-NEXT: [[TMP61:%.*]] = load i32, i32* [[N_ADDR]], align 4 15082 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP61]], 10 15083 // CHECK20-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 15084 // CHECK20: omp_if.then: 15085 // CHECK20-NEXT: [[TMP62:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 15086 // CHECK20-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* 15087 // CHECK20-NEXT: store i32 [[TMP58]], i32* [[TMP63]], align 4 15088 // CHECK20-NEXT: [[TMP64:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 15089 // CHECK20-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* 15090 // CHECK20-NEXT: store i32 [[TMP58]], i32* [[TMP65]], align 4 15091 // CHECK20-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 0 15092 // CHECK20-NEXT: store i8* null, i8** [[TMP66]], align 4 15093 // CHECK20-NEXT: [[TMP67:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 1 15094 // CHECK20-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* 15095 // CHECK20-NEXT: store i32 [[TMP60]], i32* [[TMP68]], align 4 15096 // CHECK20-NEXT: [[TMP69:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 1 15097 // CHECK20-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* 15098 // CHECK20-NEXT: store i32 [[TMP60]], i32* [[TMP70]], align 4 15099 // CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS8]], i32 0, i32 1 15100 // CHECK20-NEXT: store i8* null, i8** [[TMP71]], align 4 15101 // CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS6]], i32 0, i32 0 15102 // CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS7]], i32 0, i32 0 15103 // CHECK20-NEXT: [[TMP74:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146.region_id, i32 2, i8** [[TMP72]], i8** [[TMP73]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 15104 // CHECK20-NEXT: [[TMP75:%.*]] = icmp ne i32 [[TMP74]], 0 15105 // CHECK20-NEXT: br i1 [[TMP75]], label [[OMP_OFFLOAD_FAILED9:%.*]], label [[OMP_OFFLOAD_CONT10:%.*]] 15106 // CHECK20: omp_offload.failed9: 15107 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i32 [[TMP58]], i32 [[TMP60]]) #[[ATTR4]] 15108 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT10]] 15109 // CHECK20: omp_offload.cont10: 15110 // CHECK20-NEXT: br label [[OMP_IF_END:%.*]] 15111 // CHECK20: omp_if.else: 15112 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146(i32 [[TMP58]], i32 [[TMP60]]) #[[ATTR4]] 15113 // CHECK20-NEXT: br label [[OMP_IF_END]] 15114 // CHECK20: omp_if.end: 15115 // CHECK20-NEXT: [[TMP76:%.*]] = load i32, i32* [[A]], align 4 15116 // CHECK20-NEXT: store i32 [[TMP76]], i32* [[DOTCAPTURE_EXPR_]], align 4 15117 // CHECK20-NEXT: [[TMP77:%.*]] = load i32, i32* [[A]], align 4 15118 // CHECK20-NEXT: store i32 [[TMP77]], i32* [[A_CASTED11]], align 4 15119 // CHECK20-NEXT: [[TMP78:%.*]] = load i32, i32* [[A_CASTED11]], align 4 15120 // CHECK20-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 15121 // CHECK20-NEXT: store i32 [[TMP79]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 15122 // CHECK20-NEXT: [[TMP80:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 15123 // CHECK20-NEXT: [[TMP81:%.*]] = load i32, i32* [[N_ADDR]], align 4 15124 // CHECK20-NEXT: [[CMP12:%.*]] = icmp sgt i32 [[TMP81]], 20 15125 // CHECK20-NEXT: br i1 [[CMP12]], label [[OMP_IF_THEN13:%.*]], label [[OMP_IF_ELSE19:%.*]] 15126 // CHECK20: omp_if.then13: 15127 // CHECK20-NEXT: [[TMP82:%.*]] = mul nuw i32 [[TMP1]], 4 15128 // CHECK20-NEXT: [[TMP83:%.*]] = sext i32 [[TMP82]] to i64 15129 // CHECK20-NEXT: [[TMP84:%.*]] = mul nuw i32 5, [[TMP3]] 15130 // CHECK20-NEXT: [[TMP85:%.*]] = mul nuw i32 [[TMP84]], 8 15131 // CHECK20-NEXT: [[TMP86:%.*]] = sext i32 [[TMP85]] to i64 15132 // CHECK20-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 15133 // CHECK20-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* 15134 // CHECK20-NEXT: store i32 [[TMP78]], i32* [[TMP88]], align 4 15135 // CHECK20-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 15136 // CHECK20-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* 15137 // CHECK20-NEXT: store i32 [[TMP78]], i32* [[TMP90]], align 4 15138 // CHECK20-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 15139 // CHECK20-NEXT: store i64 4, i64* [[TMP91]], align 4 15140 // CHECK20-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 15141 // CHECK20-NEXT: store i8* null, i8** [[TMP92]], align 4 15142 // CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 15143 // CHECK20-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to [10 x float]** 15144 // CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP94]], align 4 15145 // CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 15146 // CHECK20-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** 15147 // CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 4 15148 // CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 15149 // CHECK20-NEXT: store i64 40, i64* [[TMP97]], align 4 15150 // CHECK20-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 15151 // CHECK20-NEXT: store i8* null, i8** [[TMP98]], align 4 15152 // CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 15153 // CHECK20-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* 15154 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP100]], align 4 15155 // CHECK20-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 15156 // CHECK20-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* 15157 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP102]], align 4 15158 // CHECK20-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 15159 // CHECK20-NEXT: store i64 4, i64* [[TMP103]], align 4 15160 // CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 15161 // CHECK20-NEXT: store i8* null, i8** [[TMP104]], align 4 15162 // CHECK20-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 15163 // CHECK20-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to float** 15164 // CHECK20-NEXT: store float* [[VLA]], float** [[TMP106]], align 4 15165 // CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 15166 // CHECK20-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** 15167 // CHECK20-NEXT: store float* [[VLA]], float** [[TMP108]], align 4 15168 // CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 15169 // CHECK20-NEXT: store i64 [[TMP83]], i64* [[TMP109]], align 4 15170 // CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 15171 // CHECK20-NEXT: store i8* null, i8** [[TMP110]], align 4 15172 // CHECK20-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 15173 // CHECK20-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** 15174 // CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 4 15175 // CHECK20-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 15176 // CHECK20-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** 15177 // CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 4 15178 // CHECK20-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 15179 // CHECK20-NEXT: store i64 400, i64* [[TMP115]], align 4 15180 // CHECK20-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 15181 // CHECK20-NEXT: store i8* null, i8** [[TMP116]], align 4 15182 // CHECK20-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 15183 // CHECK20-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32* 15184 // CHECK20-NEXT: store i32 5, i32* [[TMP118]], align 4 15185 // CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 15186 // CHECK20-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32* 15187 // CHECK20-NEXT: store i32 5, i32* [[TMP120]], align 4 15188 // CHECK20-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 15189 // CHECK20-NEXT: store i64 4, i64* [[TMP121]], align 4 15190 // CHECK20-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 15191 // CHECK20-NEXT: store i8* null, i8** [[TMP122]], align 4 15192 // CHECK20-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 15193 // CHECK20-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* 15194 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP124]], align 4 15195 // CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 15196 // CHECK20-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* 15197 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 15198 // CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 15199 // CHECK20-NEXT: store i64 4, i64* [[TMP127]], align 4 15200 // CHECK20-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 15201 // CHECK20-NEXT: store i8* null, i8** [[TMP128]], align 4 15202 // CHECK20-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 15203 // CHECK20-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to double** 15204 // CHECK20-NEXT: store double* [[VLA1]], double** [[TMP130]], align 4 15205 // CHECK20-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 15206 // CHECK20-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** 15207 // CHECK20-NEXT: store double* [[VLA1]], double** [[TMP132]], align 4 15208 // CHECK20-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 15209 // CHECK20-NEXT: store i64 [[TMP86]], i64* [[TMP133]], align 4 15210 // CHECK20-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 15211 // CHECK20-NEXT: store i8* null, i8** [[TMP134]], align 4 15212 // CHECK20-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 15213 // CHECK20-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** 15214 // CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 15215 // CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 15216 // CHECK20-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** 15217 // CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 15218 // CHECK20-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 15219 // CHECK20-NEXT: store i64 12, i64* [[TMP139]], align 4 15220 // CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 15221 // CHECK20-NEXT: store i8* null, i8** [[TMP140]], align 4 15222 // CHECK20-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 15223 // CHECK20-NEXT: [[TMP142:%.*]] = bitcast i8** [[TMP141]] to i32* 15224 // CHECK20-NEXT: store i32 [[TMP80]], i32* [[TMP142]], align 4 15225 // CHECK20-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 15226 // CHECK20-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* 15227 // CHECK20-NEXT: store i32 [[TMP80]], i32* [[TMP144]], align 4 15228 // CHECK20-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 15229 // CHECK20-NEXT: store i64 4, i64* [[TMP145]], align 4 15230 // CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 15231 // CHECK20-NEXT: store i8* null, i8** [[TMP146]], align 4 15232 // CHECK20-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 15233 // CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 15234 // CHECK20-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 15235 // CHECK20-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP147]], i8** [[TMP148]], i64* [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 15236 // CHECK20-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 15237 // CHECK20-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] 15238 // CHECK20: omp_offload.failed17: 15239 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] 15240 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT18]] 15241 // CHECK20: omp_offload.cont18: 15242 // CHECK20-NEXT: br label [[OMP_IF_END20:%.*]] 15243 // CHECK20: omp_if.else19: 15244 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] 15245 // CHECK20-NEXT: br label [[OMP_IF_END20]] 15246 // CHECK20: omp_if.end20: 15247 // CHECK20-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 15248 // CHECK20-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 15249 // CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) 15250 // CHECK20-NEXT: ret i32 [[TMP152]] 15251 // 15252 // 15253 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 15254 // CHECK20-SAME: () #[[ATTR2:[0-9]+]] { 15255 // CHECK20-NEXT: entry: 15256 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 15257 // CHECK20-NEXT: ret void 15258 // 15259 // 15260 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined. 15261 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] { 15262 // CHECK20-NEXT: entry: 15263 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 15264 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 15265 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 15266 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 15267 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 15268 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 15269 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 15270 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 15271 // CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4 15272 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 15273 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 15274 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 15275 // CHECK20-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 15276 // CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 15277 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 15278 // CHECK20-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 15279 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 15280 // CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 15281 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15282 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 15283 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 15284 // CHECK20: cond.true: 15285 // CHECK20-NEXT: br label [[COND_END:%.*]] 15286 // CHECK20: cond.false: 15287 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15288 // CHECK20-NEXT: br label [[COND_END]] 15289 // CHECK20: cond.end: 15290 // CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 15291 // CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 15292 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 15293 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 15294 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15295 // CHECK20: omp.inner.for.cond: 15296 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15297 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15298 // CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 15299 // CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15300 // CHECK20: omp.inner.for.body: 15301 // CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15302 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 15303 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 15304 // CHECK20-NEXT: store i32 [[ADD]], i32* [[I]], align 4 15305 // CHECK20-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 15306 // CHECK20-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 15307 // CHECK20-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 15308 // CHECK20: .cancel.exit: 15309 // CHECK20-NEXT: br label [[CANCEL_EXIT:%.*]] 15310 // CHECK20: .cancel.continue: 15311 // CHECK20-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 15312 // CHECK20-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 15313 // CHECK20-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 15314 // CHECK20: .cancel.exit2: 15315 // CHECK20-NEXT: br label [[CANCEL_EXIT]] 15316 // CHECK20: .cancel.continue3: 15317 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15318 // CHECK20: omp.body.continue: 15319 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15320 // CHECK20: omp.inner.for.inc: 15321 // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15322 // CHECK20-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 15323 // CHECK20-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 15324 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]] 15325 // CHECK20: omp.inner.for.end: 15326 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 15327 // CHECK20: omp.loop.exit: 15328 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 15329 // CHECK20-NEXT: br label [[CANCEL_CONT:%.*]] 15330 // CHECK20: cancel.cont: 15331 // CHECK20-NEXT: ret void 15332 // CHECK20: cancel.exit: 15333 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 15334 // CHECK20-NEXT: br label [[CANCEL_CONT]] 15335 // 15336 // 15337 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l110 15338 // CHECK20-SAME: (i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 15339 // CHECK20-NEXT: entry: 15340 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 15341 // CHECK20-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 15342 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 15343 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 15344 // CHECK20-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 15345 // CHECK20-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 15346 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 15347 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 15348 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 15349 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i64*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP2]], i64* [[TMP0]]) 15350 // CHECK20-NEXT: ret void 15351 // 15352 // 15353 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..1 15354 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i64* nonnull align 4 dereferenceable(8) [[K:%.*]]) #[[ATTR3]] { 15355 // CHECK20-NEXT: entry: 15356 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 15357 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 15358 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 15359 // CHECK20-NEXT: [[K_ADDR:%.*]] = alloca i64*, align 4 15360 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 15361 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 15362 // CHECK20-NEXT: [[DOTLINEAR_START:%.*]] = alloca i64, align 8 15363 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 15364 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 15365 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 15366 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 15367 // CHECK20-NEXT: [[I:%.*]] = alloca i32, align 4 15368 // CHECK20-NEXT: [[K1:%.*]] = alloca i64, align 8 15369 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 15370 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 15371 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 15372 // CHECK20-NEXT: store i64* [[K]], i64** [[K_ADDR]], align 4 15373 // CHECK20-NEXT: [[TMP0:%.*]] = load i64*, i64** [[K_ADDR]], align 4 15374 // CHECK20-NEXT: [[TMP1:%.*]] = load i64, i64* [[TMP0]], align 8 15375 // CHECK20-NEXT: store i64 [[TMP1]], i64* [[DOTLINEAR_START]], align 8 15376 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 15377 // CHECK20-NEXT: store i32 8, i32* [[DOTOMP_UB]], align 4 15378 // CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 15379 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 15380 // CHECK20-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 15381 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 15382 // CHECK20-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 15383 // CHECK20-NEXT: call void @__kmpc_dispatch_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 1073741859, i32 0, i32 8, i32 1, i32 1) 15384 // CHECK20-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 15385 // CHECK20: omp.dispatch.cond: 15386 // CHECK20-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_dispatch_next_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]]) 15387 // CHECK20-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP4]], 0 15388 // CHECK20-NEXT: br i1 [[TOBOOL]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 15389 // CHECK20: omp.dispatch.body: 15390 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 15391 // CHECK20-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 15392 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15393 // CHECK20: omp.inner.for.cond: 15394 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 15395 // CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 15396 // CHECK20-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 15397 // CHECK20-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15398 // CHECK20: omp.inner.for.body: 15399 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 15400 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 15401 // CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 10, [[MUL]] 15402 // CHECK20-NEXT: store i32 [[SUB]], i32* [[I]], align 4, !llvm.access.group !13 15403 // CHECK20-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8, !llvm.access.group !13 15404 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 15405 // CHECK20-NEXT: [[MUL2:%.*]] = mul nsw i32 [[TMP10]], 3 15406 // CHECK20-NEXT: [[CONV:%.*]] = sext i32 [[MUL2]] to i64 15407 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP9]], [[CONV]] 15408 // CHECK20-NEXT: store i64 [[ADD]], i64* [[K1]], align 8, !llvm.access.group !13 15409 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[A_ADDR]], align 4, !llvm.access.group !13 15410 // CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1 15411 // CHECK20-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4, !llvm.access.group !13 15412 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15413 // CHECK20: omp.body.continue: 15414 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15415 // CHECK20: omp.inner.for.inc: 15416 // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 15417 // CHECK20-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 15418 // CHECK20-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 15419 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 15420 // CHECK20: omp.inner.for.end: 15421 // CHECK20-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 15422 // CHECK20: omp.dispatch.inc: 15423 // CHECK20-NEXT: br label [[OMP_DISPATCH_COND]] 15424 // CHECK20: omp.dispatch.end: 15425 // CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 15426 // CHECK20-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 15427 // CHECK20-NEXT: br i1 [[TMP14]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 15428 // CHECK20: .omp.linear.pu: 15429 // CHECK20-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_START]], align 8 15430 // CHECK20-NEXT: [[ADD5:%.*]] = add nsw i64 [[TMP15]], 27 15431 // CHECK20-NEXT: store i64 [[ADD5]], i64* [[TMP0]], align 8 15432 // CHECK20-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 15433 // CHECK20: .omp.linear.pu.done: 15434 // CHECK20-NEXT: ret void 15435 // 15436 // 15437 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 15438 // CHECK20-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR2]] { 15439 // CHECK20-NEXT: entry: 15440 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 15441 // CHECK20-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 15442 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 15443 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 15444 // CHECK20-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 15445 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 15446 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 15447 // CHECK20-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 15448 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 15449 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 15450 // CHECK20-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 15451 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 15452 // CHECK20-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 15453 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 15454 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 15455 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 15456 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 15457 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 15458 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 15459 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 15460 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 15461 // CHECK20-NEXT: ret void 15462 // 15463 // 15464 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..2 15465 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR3]] { 15466 // CHECK20-NEXT: entry: 15467 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 15468 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 15469 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 15470 // CHECK20-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 15471 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 15472 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 15473 // CHECK20-NEXT: [[TMP:%.*]] = alloca i64, align 4 15474 // CHECK20-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 15475 // CHECK20-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 15476 // CHECK20-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 15477 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 15478 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 15479 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 15480 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 15481 // CHECK20-NEXT: [[IT:%.*]] = alloca i64, align 8 15482 // CHECK20-NEXT: [[LIN2:%.*]] = alloca i32, align 4 15483 // CHECK20-NEXT: [[A3:%.*]] = alloca i32, align 4 15484 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 15485 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 15486 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 15487 // CHECK20-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 15488 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 15489 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 15490 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 15491 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 15492 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 15493 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 15494 // CHECK20-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() 15495 // CHECK20-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 15496 // CHECK20-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 15497 // CHECK20-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 15498 // CHECK20-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 15499 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 15500 // CHECK20-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 15501 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 15502 // CHECK20-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 15503 // CHECK20-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 15504 // CHECK20-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 15505 // CHECK20-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 15506 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 15507 // CHECK20: cond.true: 15508 // CHECK20-NEXT: br label [[COND_END:%.*]] 15509 // CHECK20: cond.false: 15510 // CHECK20-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 15511 // CHECK20-NEXT: br label [[COND_END]] 15512 // CHECK20: cond.end: 15513 // CHECK20-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 15514 // CHECK20-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 15515 // CHECK20-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 15516 // CHECK20-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 15517 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15518 // CHECK20: omp.inner.for.cond: 15519 // CHECK20-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 15520 // CHECK20-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 15521 // CHECK20-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 15522 // CHECK20-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15523 // CHECK20: omp.inner.for.body: 15524 // CHECK20-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 15525 // CHECK20-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 15526 // CHECK20-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 15527 // CHECK20-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 15528 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 15529 // CHECK20-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 15530 // CHECK20-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 15531 // CHECK20-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 15532 // CHECK20-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 15533 // CHECK20-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 15534 // CHECK20-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 15535 // CHECK20-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4 15536 // CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 15537 // CHECK20-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 15538 // CHECK20-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 15539 // CHECK20-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 15540 // CHECK20-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 15541 // CHECK20-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 15542 // CHECK20-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 15543 // CHECK20-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4 15544 // CHECK20-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4 15545 // CHECK20-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 15546 // CHECK20-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 15547 // CHECK20-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 15548 // CHECK20-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4 15549 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15550 // CHECK20: omp.body.continue: 15551 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15552 // CHECK20: omp.inner.for.inc: 15553 // CHECK20-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 15554 // CHECK20-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 15555 // CHECK20-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8 15556 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]] 15557 // CHECK20: omp.inner.for.end: 15558 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 15559 // CHECK20: omp.loop.exit: 15560 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 15561 // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 15562 // CHECK20-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 15563 // CHECK20-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 15564 // CHECK20: .omp.linear.pu: 15565 // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 15566 // CHECK20-NEXT: [[CONV16:%.*]] = sext i32 [[TMP20]] to i64 15567 // CHECK20-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 15568 // CHECK20-NEXT: [[MUL17:%.*]] = mul i64 4, [[TMP21]] 15569 // CHECK20-NEXT: [[ADD18:%.*]] = add i64 [[CONV16]], [[MUL17]] 15570 // CHECK20-NEXT: [[CONV19:%.*]] = trunc i64 [[ADD18]] to i32 15571 // CHECK20-NEXT: store i32 [[CONV19]], i32* [[LIN_ADDR]], align 4 15572 // CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 15573 // CHECK20-NEXT: [[CONV20:%.*]] = sext i32 [[TMP22]] to i64 15574 // CHECK20-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 15575 // CHECK20-NEXT: [[MUL21:%.*]] = mul i64 4, [[TMP23]] 15576 // CHECK20-NEXT: [[ADD22:%.*]] = add i64 [[CONV20]], [[MUL21]] 15577 // CHECK20-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD22]] to i32 15578 // CHECK20-NEXT: store i32 [[CONV23]], i32* [[A_ADDR]], align 4 15579 // CHECK20-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 15580 // CHECK20: .omp.linear.pu.done: 15581 // CHECK20-NEXT: ret void 15582 // 15583 // 15584 // CHECK20-LABEL: define {{[^@]+}}@.omp_task_privates_map. 15585 // CHECK20-SAME: (%struct..kmp_privates.t* noalias [[TMP0:%.*]], i16** noalias [[TMP1:%.*]], [3 x i8*]** noalias [[TMP2:%.*]], [3 x i8*]** noalias [[TMP3:%.*]], [3 x i64]** noalias [[TMP4:%.*]]) #[[ATTR6:[0-9]+]] { 15586 // CHECK20-NEXT: entry: 15587 // CHECK20-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 4 15588 // CHECK20-NEXT: [[DOTADDR1:%.*]] = alloca i16**, align 4 15589 // CHECK20-NEXT: [[DOTADDR2:%.*]] = alloca [3 x i8*]**, align 4 15590 // CHECK20-NEXT: [[DOTADDR3:%.*]] = alloca [3 x i8*]**, align 4 15591 // CHECK20-NEXT: [[DOTADDR4:%.*]] = alloca [3 x i64]**, align 4 15592 // CHECK20-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 4 15593 // CHECK20-NEXT: store i16** [[TMP1]], i16*** [[DOTADDR1]], align 4 15594 // CHECK20-NEXT: store [3 x i8*]** [[TMP2]], [3 x i8*]*** [[DOTADDR2]], align 4 15595 // CHECK20-NEXT: store [3 x i8*]** [[TMP3]], [3 x i8*]*** [[DOTADDR3]], align 4 15596 // CHECK20-NEXT: store [3 x i64]** [[TMP4]], [3 x i64]*** [[DOTADDR4]], align 4 15597 // CHECK20-NEXT: [[TMP5:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 4 15598 // CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 0 15599 // CHECK20-NEXT: [[TMP7:%.*]] = load [3 x i64]**, [3 x i64]*** [[DOTADDR4]], align 4 15600 // CHECK20-NEXT: store [3 x i64]* [[TMP6]], [3 x i64]** [[TMP7]], align 4 15601 // CHECK20-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 1 15602 // CHECK20-NEXT: [[TMP9:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR2]], align 4 15603 // CHECK20-NEXT: store [3 x i8*]* [[TMP8]], [3 x i8*]** [[TMP9]], align 4 15604 // CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 2 15605 // CHECK20-NEXT: [[TMP11:%.*]] = load [3 x i8*]**, [3 x i8*]*** [[DOTADDR3]], align 4 15606 // CHECK20-NEXT: store [3 x i8*]* [[TMP10]], [3 x i8*]** [[TMP11]], align 4 15607 // CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP5]], i32 0, i32 3 15608 // CHECK20-NEXT: [[TMP13:%.*]] = load i16**, i16*** [[DOTADDR1]], align 4 15609 // CHECK20-NEXT: store i16* [[TMP12]], i16** [[TMP13]], align 4 15610 // CHECK20-NEXT: ret void 15611 // 15612 // 15613 // CHECK20-LABEL: define {{[^@]+}}@.omp_task_entry. 15614 // CHECK20-SAME: (i32 [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] { 15615 // CHECK20-NEXT: entry: 15616 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 15617 // CHECK20-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 4 15618 // CHECK20-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 4 15619 // CHECK20-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 4 15620 // CHECK20-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 4 15621 // CHECK20-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 4 15622 // CHECK20-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i16*, align 4 15623 // CHECK20-NEXT: [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [3 x i8*]*, align 4 15624 // CHECK20-NEXT: [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [3 x i8*]*, align 4 15625 // CHECK20-NEXT: [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [3 x i64]*, align 4 15626 // CHECK20-NEXT: [[AA_CASTED_I:%.*]] = alloca i32, align 4 15627 // CHECK20-NEXT: [[LIN_CASTED_I:%.*]] = alloca i32, align 4 15628 // CHECK20-NEXT: [[A_CASTED_I:%.*]] = alloca i32, align 4 15629 // CHECK20-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 15630 // CHECK20-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 4 15631 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 15632 // CHECK20-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 15633 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 15634 // CHECK20-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 4 15635 // CHECK20-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 15636 // CHECK20-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 15637 // CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 15638 // CHECK20-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 15639 // CHECK20-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* 15640 // CHECK20-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 15641 // CHECK20-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* 15642 // CHECK20-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* 15643 // CHECK20-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META16:![0-9]+]]) 15644 // CHECK20-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META19:![0-9]+]]) 15645 // CHECK20-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]]) 15646 // CHECK20-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META23:![0-9]+]]) 15647 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !25 15648 // CHECK20-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 4, !noalias !25 15649 // CHECK20-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 15650 // CHECK20-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 15651 // CHECK20-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 4, !noalias !25 15652 // CHECK20-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !25 15653 // CHECK20-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 4, !noalias !25 15654 // CHECK20-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 4, !noalias !25 15655 // CHECK20-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 4, !noalias !25 15656 // CHECK20-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i16**, [3 x i8*]**, [3 x i8*]**, [3 x i64]**)* 15657 // CHECK20-NEXT: call void [[TMP15]](i8* [[TMP14]], i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]] 15658 // CHECK20-NEXT: [[TMP16:%.*]] = load i16*, i16** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 4, !noalias !25 15659 // CHECK20-NEXT: [[TMP17:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 4, !noalias !25 15660 // CHECK20-NEXT: [[TMP18:%.*]] = load [3 x i8*]*, [3 x i8*]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 4, !noalias !25 15661 // CHECK20-NEXT: [[TMP19:%.*]] = load [3 x i64]*, [3 x i64]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 4, !noalias !25 15662 // CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP17]], i32 0, i32 0 15663 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[TMP18]], i32 0, i32 0 15664 // CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[TMP19]], i32 0, i32 0 15665 // CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP12]], i32 0, i32 1 15666 // CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP12]], i32 0, i32 2 15667 // CHECK20-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_nowait_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0, i32 0, i8* null, i32 0, i8* null) #[[ATTR4]] 15668 // CHECK20-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 15669 // CHECK20-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]] 15670 // CHECK20: omp_offload.failed.i: 15671 // CHECK20-NEXT: [[TMP27:%.*]] = load i16, i16* [[TMP16]], align 2 15672 // CHECK20-NEXT: [[CONV_I:%.*]] = bitcast i32* [[AA_CASTED_I]] to i16* 15673 // CHECK20-NEXT: store i16 [[TMP27]], i16* [[CONV_I]], align 2, !noalias !25 15674 // CHECK20-NEXT: [[TMP28:%.*]] = load i32, i32* [[AA_CASTED_I]], align 4, !noalias !25 15675 // CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP23]], align 4 15676 // CHECK20-NEXT: store i32 [[TMP29]], i32* [[LIN_CASTED_I]], align 4, !noalias !25 15677 // CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[LIN_CASTED_I]], align 4, !noalias !25 15678 // CHECK20-NEXT: [[TMP31:%.*]] = load i32, i32* [[TMP24]], align 4 15679 // CHECK20-NEXT: store i32 [[TMP31]], i32* [[A_CASTED_I]], align 4, !noalias !25 15680 // CHECK20-NEXT: [[TMP32:%.*]] = load i32, i32* [[A_CASTED_I]], align 4, !noalias !25 15681 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138(i32 [[TMP28]], i32 [[TMP30]], i32 [[TMP32]]) #[[ATTR4]] 15682 // CHECK20-NEXT: br label [[DOTOMP_OUTLINED__3_EXIT]] 15683 // CHECK20: .omp_outlined..3.exit: 15684 // CHECK20-NEXT: ret i32 0 15685 // 15686 // 15687 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 15688 // CHECK20-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR2]] { 15689 // CHECK20-NEXT: entry: 15690 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 15691 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 15692 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 15693 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 15694 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 15695 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 15696 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 15697 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 15698 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 15699 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 15700 // CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 15701 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 15702 // CHECK20-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 15703 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 15704 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 15705 // CHECK20-NEXT: ret void 15706 // 15707 // 15708 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..4 15709 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR3]] { 15710 // CHECK20-NEXT: entry: 15711 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 15712 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 15713 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 15714 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 15715 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 15716 // CHECK20-NEXT: [[TMP:%.*]] = alloca i16, align 2 15717 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 15718 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 15719 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 15720 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 15721 // CHECK20-NEXT: [[IT:%.*]] = alloca i16, align 2 15722 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 15723 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 15724 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 15725 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 15726 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 15727 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 15728 // CHECK20-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 15729 // CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 15730 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 15731 // CHECK20-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 15732 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 15733 // CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 15734 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15735 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 15736 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 15737 // CHECK20: cond.true: 15738 // CHECK20-NEXT: br label [[COND_END:%.*]] 15739 // CHECK20: cond.false: 15740 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15741 // CHECK20-NEXT: br label [[COND_END]] 15742 // CHECK20: cond.end: 15743 // CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 15744 // CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 15745 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 15746 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 15747 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15748 // CHECK20: omp.inner.for.cond: 15749 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15750 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15751 // CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 15752 // CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15753 // CHECK20: omp.inner.for.body: 15754 // CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15755 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 15756 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 15757 // CHECK20-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 15758 // CHECK20-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2 15759 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 15760 // CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 15761 // CHECK20-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4 15762 // CHECK20-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4 15763 // CHECK20-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 15764 // CHECK20-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 15765 // CHECK20-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 15766 // CHECK20-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4 15767 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15768 // CHECK20: omp.body.continue: 15769 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15770 // CHECK20: omp.inner.for.inc: 15771 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15772 // CHECK20-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 15773 // CHECK20-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 15774 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]] 15775 // CHECK20: omp.inner.for.end: 15776 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 15777 // CHECK20: omp.loop.exit: 15778 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 15779 // CHECK20-NEXT: ret void 15780 // 15781 // 15782 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 15783 // CHECK20-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 15784 // CHECK20-NEXT: entry: 15785 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 15786 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 15787 // CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 15788 // CHECK20-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 15789 // CHECK20-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 15790 // CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 15791 // CHECK20-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 15792 // CHECK20-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 15793 // CHECK20-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 15794 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 15795 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 15796 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 15797 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 15798 // CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 15799 // CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 15800 // CHECK20-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 15801 // CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 15802 // CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 15803 // CHECK20-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 15804 // CHECK20-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 15805 // CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 15806 // CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 15807 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 15808 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 15809 // CHECK20-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 15810 // CHECK20-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 15811 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 15812 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 15813 // CHECK20-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 15814 // CHECK20-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 15815 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 15816 // CHECK20-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 15817 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 15818 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 15819 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 15820 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 15821 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 15822 // CHECK20-NEXT: ret void 15823 // 15824 // 15825 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..7 15826 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { 15827 // CHECK20-NEXT: entry: 15828 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 15829 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 15830 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 15831 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 15832 // CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 15833 // CHECK20-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 15834 // CHECK20-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 15835 // CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 15836 // CHECK20-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 15837 // CHECK20-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 15838 // CHECK20-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 15839 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 15840 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 15841 // CHECK20-NEXT: [[TMP:%.*]] = alloca i8, align 1 15842 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 15843 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 15844 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 15845 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 15846 // CHECK20-NEXT: [[IT:%.*]] = alloca i8, align 1 15847 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 15848 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 15849 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 15850 // CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 15851 // CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 15852 // CHECK20-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 15853 // CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 15854 // CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 15855 // CHECK20-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 15856 // CHECK20-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 15857 // CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 15858 // CHECK20-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 15859 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 15860 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 15861 // CHECK20-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 15862 // CHECK20-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 15863 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 15864 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 15865 // CHECK20-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 15866 // CHECK20-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 15867 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 15868 // CHECK20-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 15869 // CHECK20-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 15870 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 15871 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 15872 // CHECK20-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 15873 // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 15874 // CHECK20-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 15875 // CHECK20-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 15876 // CHECK20: omp.dispatch.cond: 15877 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15878 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 15879 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 15880 // CHECK20: cond.true: 15881 // CHECK20-NEXT: br label [[COND_END:%.*]] 15882 // CHECK20: cond.false: 15883 // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15884 // CHECK20-NEXT: br label [[COND_END]] 15885 // CHECK20: cond.end: 15886 // CHECK20-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 15887 // CHECK20-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 15888 // CHECK20-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 15889 // CHECK20-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 15890 // CHECK20-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15891 // CHECK20-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15892 // CHECK20-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 15893 // CHECK20-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 15894 // CHECK20: omp.dispatch.body: 15895 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 15896 // CHECK20: omp.inner.for.cond: 15897 // CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15898 // CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15899 // CHECK20-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 15900 // CHECK20-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 15901 // CHECK20: omp.inner.for.body: 15902 // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15903 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 15904 // CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 15905 // CHECK20-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 15906 // CHECK20-NEXT: store i8 [[CONV]], i8* [[IT]], align 1 15907 // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4 15908 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 15909 // CHECK20-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 15910 // CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 15911 // CHECK20-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 15912 // CHECK20-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 15913 // CHECK20-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 15914 // CHECK20-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 15915 // CHECK20-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4 15916 // CHECK20-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 15917 // CHECK20-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4 15918 // CHECK20-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 15919 // CHECK20-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 15920 // CHECK20-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 15921 // CHECK20-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4 15922 // CHECK20-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 15923 // CHECK20-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 15924 // CHECK20-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8 15925 // CHECK20-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 15926 // CHECK20-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8 15927 // CHECK20-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 15928 // CHECK20-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 15929 // CHECK20-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 15930 // CHECK20-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8 15931 // CHECK20-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 15932 // CHECK20-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8 15933 // CHECK20-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 15934 // CHECK20-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4 15935 // CHECK20-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 15936 // CHECK20-NEXT: store i64 [[ADD20]], i64* [[X]], align 4 15937 // CHECK20-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 15938 // CHECK20-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4 15939 // CHECK20-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 15940 // CHECK20-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 15941 // CHECK20-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 15942 // CHECK20-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4 15943 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 15944 // CHECK20: omp.body.continue: 15945 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 15946 // CHECK20: omp.inner.for.inc: 15947 // CHECK20-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 15948 // CHECK20-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 15949 // CHECK20-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4 15950 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]] 15951 // CHECK20: omp.inner.for.end: 15952 // CHECK20-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 15953 // CHECK20: omp.dispatch.inc: 15954 // CHECK20-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 15955 // CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 15956 // CHECK20-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 15957 // CHECK20-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 15958 // CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 15959 // CHECK20-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 15960 // CHECK20-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 15961 // CHECK20-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 15962 // CHECK20-NEXT: br label [[OMP_DISPATCH_COND]] 15963 // CHECK20: omp.dispatch.end: 15964 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 15965 // CHECK20-NEXT: ret void 15966 // 15967 // 15968 // CHECK20-LABEL: define {{[^@]+}}@_Z3bari 15969 // CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 15970 // CHECK20-NEXT: entry: 15971 // CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 15972 // CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4 15973 // CHECK20-NEXT: [[S:%.*]] = alloca [[STRUCT_S1:%.*]], align 4 15974 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 15975 // CHECK20-NEXT: store i32 0, i32* [[A]], align 4 15976 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 15977 // CHECK20-NEXT: [[CALL:%.*]] = call i32 @_Z3fooi(i32 [[TMP0]]) 15978 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A]], align 4 15979 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[CALL]] 15980 // CHECK20-NEXT: store i32 [[ADD]], i32* [[A]], align 4 15981 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 15982 // CHECK20-NEXT: [[CALL1:%.*]] = call i32 @_ZN2S12r1Ei(%struct.S1* nonnull align 4 dereferenceable(8) [[S]], i32 [[TMP2]]) 15983 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[A]], align 4 15984 // CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP3]], [[CALL1]] 15985 // CHECK20-NEXT: store i32 [[ADD2]], i32* [[A]], align 4 15986 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 15987 // CHECK20-NEXT: [[CALL3:%.*]] = call i32 @_ZL7fstatici(i32 [[TMP4]]) 15988 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[A]], align 4 15989 // CHECK20-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP5]], [[CALL3]] 15990 // CHECK20-NEXT: store i32 [[ADD4]], i32* [[A]], align 4 15991 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 15992 // CHECK20-NEXT: [[CALL5:%.*]] = call i32 @_Z9ftemplateIiET_i(i32 [[TMP6]]) 15993 // CHECK20-NEXT: [[TMP7:%.*]] = load i32, i32* [[A]], align 4 15994 // CHECK20-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP7]], [[CALL5]] 15995 // CHECK20-NEXT: store i32 [[ADD6]], i32* [[A]], align 4 15996 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[A]], align 4 15997 // CHECK20-NEXT: ret i32 [[TMP8]] 15998 // 15999 // 16000 // CHECK20-LABEL: define {{[^@]+}}@_ZN2S12r1Ei 16001 // CHECK20-SAME: (%struct.S1* nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 [[N:%.*]]) #[[ATTR0]] comdat align 2 { 16002 // CHECK20-NEXT: entry: 16003 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 16004 // CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16005 // CHECK20-NEXT: [[B:%.*]] = alloca i32, align 4 16006 // CHECK20-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 16007 // CHECK20-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 16008 // CHECK20-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 16009 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 16010 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 16011 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 16012 // CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 16013 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 16014 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16015 // CHECK20-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 16016 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4 16017 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], 1 16018 // CHECK20-NEXT: store i32 [[ADD]], i32* [[B]], align 4 16019 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4 16020 // CHECK20-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 16021 // CHECK20-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 4 16022 // CHECK20-NEXT: [[TMP3:%.*]] = mul nuw i32 2, [[TMP1]] 16023 // CHECK20-NEXT: [[VLA:%.*]] = alloca i16, i32 [[TMP3]], align 2 16024 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[__VLA_EXPR0]], align 4 16025 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B]], align 4 16026 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 16027 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 16028 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 16029 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 60 16030 // CHECK20-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 16031 // CHECK20: omp_if.then: 16032 // CHECK20-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[THIS1]], i32 0, i32 0 16033 // CHECK20-NEXT: [[TMP7:%.*]] = mul nuw i32 2, [[TMP1]] 16034 // CHECK20-NEXT: [[TMP8:%.*]] = mul nuw i32 [[TMP7]], 2 16035 // CHECK20-NEXT: [[TMP9:%.*]] = sext i32 [[TMP8]] to i64 16036 // CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 16037 // CHECK20-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to %struct.S1** 16038 // CHECK20-NEXT: store %struct.S1* [[THIS1]], %struct.S1** [[TMP11]], align 4 16039 // CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 16040 // CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** 16041 // CHECK20-NEXT: store double* [[A]], double** [[TMP13]], align 4 16042 // CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 16043 // CHECK20-NEXT: store i64 8, i64* [[TMP14]], align 4 16044 // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 16045 // CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 16046 // CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 16047 // CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* 16048 // CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 16049 // CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 16050 // CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* 16051 // CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 16052 // CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 16053 // CHECK20-NEXT: store i64 4, i64* [[TMP20]], align 4 16054 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 16055 // CHECK20-NEXT: store i8* null, i8** [[TMP21]], align 4 16056 // CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 16057 // CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* 16058 // CHECK20-NEXT: store i32 2, i32* [[TMP23]], align 4 16059 // CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 16060 // CHECK20-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* 16061 // CHECK20-NEXT: store i32 2, i32* [[TMP25]], align 4 16062 // CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 16063 // CHECK20-NEXT: store i64 4, i64* [[TMP26]], align 4 16064 // CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 16065 // CHECK20-NEXT: store i8* null, i8** [[TMP27]], align 4 16066 // CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 16067 // CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* 16068 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 16069 // CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 16070 // CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* 16071 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 16072 // CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 16073 // CHECK20-NEXT: store i64 4, i64* [[TMP32]], align 4 16074 // CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 16075 // CHECK20-NEXT: store i8* null, i8** [[TMP33]], align 4 16076 // CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 16077 // CHECK20-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** 16078 // CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 16079 // CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 16080 // CHECK20-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** 16081 // CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 16082 // CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 16083 // CHECK20-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 16084 // CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 16085 // CHECK20-NEXT: store i8* null, i8** [[TMP39]], align 4 16086 // CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 16087 // CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 16088 // CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 16089 // CHECK20-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 16090 // CHECK20-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 16091 // CHECK20-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 16092 // CHECK20: omp_offload.failed: 16093 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 16094 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] 16095 // CHECK20: omp_offload.cont: 16096 // CHECK20-NEXT: br label [[OMP_IF_END:%.*]] 16097 // CHECK20: omp_if.else: 16098 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] 16099 // CHECK20-NEXT: br label [[OMP_IF_END]] 16100 // CHECK20: omp_if.end: 16101 // CHECK20-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] 16102 // CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] 16103 // CHECK20-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 16104 // CHECK20-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 16105 // CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 16106 // CHECK20-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 16107 // CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] 16108 // CHECK20-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 16109 // CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) 16110 // CHECK20-NEXT: ret i32 [[ADD3]] 16111 // 16112 // 16113 // CHECK20-LABEL: define {{[^@]+}}@_ZL7fstatici 16114 // CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0]] { 16115 // CHECK20-NEXT: entry: 16116 // CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16117 // CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4 16118 // CHECK20-NEXT: [[AA:%.*]] = alloca i16, align 2 16119 // CHECK20-NEXT: [[AAA:%.*]] = alloca i8, align 1 16120 // CHECK20-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 16121 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 16122 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 16123 // CHECK20-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 16124 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 16125 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 16126 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 16127 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16128 // CHECK20-NEXT: store i32 0, i32* [[A]], align 4 16129 // CHECK20-NEXT: store i16 0, i16* [[AA]], align 2 16130 // CHECK20-NEXT: store i8 0, i8* [[AAA]], align 1 16131 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 16132 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 16133 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 16134 // CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 16135 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 16136 // CHECK20-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 16137 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 16138 // CHECK20-NEXT: [[TMP4:%.*]] = load i8, i8* [[AAA]], align 1 16139 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 16140 // CHECK20-NEXT: store i8 [[TMP4]], i8* [[CONV1]], align 1 16141 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 16142 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[N_ADDR]], align 4 16143 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 50 16144 // CHECK20-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 16145 // CHECK20: omp_if.then: 16146 // CHECK20-NEXT: [[TMP7:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 16147 // CHECK20-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 16148 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 16149 // CHECK20-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 16150 // CHECK20-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32* 16151 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP10]], align 4 16152 // CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 16153 // CHECK20-NEXT: store i8* null, i8** [[TMP11]], align 4 16154 // CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 16155 // CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 16156 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 16157 // CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 16158 // CHECK20-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 16159 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP15]], align 4 16160 // CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 16161 // CHECK20-NEXT: store i8* null, i8** [[TMP16]], align 4 16162 // CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 16163 // CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* 16164 // CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 16165 // CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 16166 // CHECK20-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* 16167 // CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP20]], align 4 16168 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 16169 // CHECK20-NEXT: store i8* null, i8** [[TMP21]], align 4 16170 // CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 16171 // CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to [10 x i32]** 16172 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP23]], align 4 16173 // CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 16174 // CHECK20-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to [10 x i32]** 16175 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP25]], align 4 16176 // CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 16177 // CHECK20-NEXT: store i8* null, i8** [[TMP26]], align 4 16178 // CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 16179 // CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 16180 // CHECK20-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 16181 // CHECK20-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 16182 // CHECK20-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 16183 // CHECK20: omp_offload.failed: 16184 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 16185 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] 16186 // CHECK20: omp_offload.cont: 16187 // CHECK20-NEXT: br label [[OMP_IF_END:%.*]] 16188 // CHECK20: omp_if.else: 16189 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224(i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]], [10 x i32]* [[B]]) #[[ATTR4]] 16190 // CHECK20-NEXT: br label [[OMP_IF_END]] 16191 // CHECK20: omp_if.end: 16192 // CHECK20-NEXT: [[TMP31:%.*]] = load i32, i32* [[A]], align 4 16193 // CHECK20-NEXT: ret i32 [[TMP31]] 16194 // 16195 // 16196 // CHECK20-LABEL: define {{[^@]+}}@_Z9ftemplateIiET_i 16197 // CHECK20-SAME: (i32 [[N:%.*]]) #[[ATTR0]] comdat { 16198 // CHECK20-NEXT: entry: 16199 // CHECK20-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 16200 // CHECK20-NEXT: [[A:%.*]] = alloca i32, align 4 16201 // CHECK20-NEXT: [[AA:%.*]] = alloca i16, align 2 16202 // CHECK20-NEXT: [[B:%.*]] = alloca [10 x i32], align 4 16203 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 16204 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 16205 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 16206 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 16207 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 16208 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 16209 // CHECK20-NEXT: store i32 0, i32* [[A]], align 4 16210 // CHECK20-NEXT: store i16 0, i16* [[AA]], align 2 16211 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 4 16212 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 16213 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 16214 // CHECK20-NEXT: [[TMP2:%.*]] = load i16, i16* [[AA]], align 2 16215 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 16216 // CHECK20-NEXT: store i16 [[TMP2]], i16* [[CONV]], align 2 16217 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 16218 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[N_ADDR]], align 4 16219 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 40 16220 // CHECK20-NEXT: br i1 [[CMP]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 16221 // CHECK20: omp_if.then: 16222 // CHECK20-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 16223 // CHECK20-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 16224 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP6]], align 4 16225 // CHECK20-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 16226 // CHECK20-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32* 16227 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP8]], align 4 16228 // CHECK20-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 16229 // CHECK20-NEXT: store i8* null, i8** [[TMP9]], align 4 16230 // CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 16231 // CHECK20-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* 16232 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 16233 // CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 16234 // CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 16235 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP13]], align 4 16236 // CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 16237 // CHECK20-NEXT: store i8* null, i8** [[TMP14]], align 4 16238 // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 16239 // CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to [10 x i32]** 16240 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP16]], align 4 16241 // CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 16242 // CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to [10 x i32]** 16243 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[TMP18]], align 4 16244 // CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 16245 // CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4 16246 // CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 16247 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 16248 // CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 16249 // CHECK20-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 16250 // CHECK20-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 16251 // CHECK20: omp_offload.failed: 16252 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 16253 // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] 16254 // CHECK20: omp_offload.cont: 16255 // CHECK20-NEXT: br label [[OMP_IF_END:%.*]] 16256 // CHECK20: omp_if.else: 16257 // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207(i32 [[TMP1]], i32 [[TMP3]], [10 x i32]* [[B]]) #[[ATTR4]] 16258 // CHECK20-NEXT: br label [[OMP_IF_END]] 16259 // CHECK20: omp_if.end: 16260 // CHECK20-NEXT: [[TMP24:%.*]] = load i32, i32* [[A]], align 4 16261 // CHECK20-NEXT: ret i32 [[TMP24]] 16262 // 16263 // 16264 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 16265 // CHECK20-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { 16266 // CHECK20-NEXT: entry: 16267 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 16268 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 16269 // CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 16270 // CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 16271 // CHECK20-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 16272 // CHECK20-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 16273 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 16274 // CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 16275 // CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 16276 // CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 16277 // CHECK20-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 16278 // CHECK20-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 16279 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 16280 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 16281 // CHECK20-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 16282 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 16283 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 16284 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 16285 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 16286 // CHECK20-NEXT: ret void 16287 // 16288 // 16289 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9 16290 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { 16291 // CHECK20-NEXT: entry: 16292 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 16293 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 16294 // CHECK20-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 16295 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 16296 // CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 16297 // CHECK20-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 16298 // CHECK20-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 16299 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 16300 // CHECK20-NEXT: [[TMP:%.*]] = alloca i64, align 4 16301 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 16302 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 16303 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 16304 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 16305 // CHECK20-NEXT: [[IT:%.*]] = alloca i64, align 8 16306 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 16307 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 16308 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 16309 // CHECK20-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 16310 // CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 16311 // CHECK20-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 16312 // CHECK20-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 16313 // CHECK20-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 16314 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 16315 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 16316 // CHECK20-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 16317 // CHECK20-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 16318 // CHECK20-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 16319 // CHECK20-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 16320 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 16321 // CHECK20-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 16322 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 16323 // CHECK20-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 16324 // CHECK20-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16325 // CHECK20-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 16326 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 16327 // CHECK20: cond.true: 16328 // CHECK20-NEXT: br label [[COND_END:%.*]] 16329 // CHECK20: cond.false: 16330 // CHECK20-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16331 // CHECK20-NEXT: br label [[COND_END]] 16332 // CHECK20: cond.end: 16333 // CHECK20-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 16334 // CHECK20-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 16335 // CHECK20-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 16336 // CHECK20-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 16337 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16338 // CHECK20: omp.inner.for.cond: 16339 // CHECK20-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16340 // CHECK20-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16341 // CHECK20-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 16342 // CHECK20-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16343 // CHECK20: omp.inner.for.body: 16344 // CHECK20-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16345 // CHECK20-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 16346 // CHECK20-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 16347 // CHECK20-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 16348 // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4 16349 // CHECK20-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 16350 // CHECK20-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 16351 // CHECK20-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 16352 // CHECK20-NEXT: store double [[ADD]], double* [[A]], align 4 16353 // CHECK20-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 16354 // CHECK20-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4 16355 // CHECK20-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 16356 // CHECK20-NEXT: store double [[INC]], double* [[A4]], align 4 16357 // CHECK20-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 16358 // CHECK20-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 16359 // CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 16360 // CHECK20-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 16361 // CHECK20-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2 16362 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16363 // CHECK20: omp.body.continue: 16364 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16365 // CHECK20: omp.inner.for.inc: 16366 // CHECK20-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16367 // CHECK20-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 16368 // CHECK20-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 16369 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]] 16370 // CHECK20: omp.inner.for.end: 16371 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 16372 // CHECK20: omp.loop.exit: 16373 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 16374 // CHECK20-NEXT: ret void 16375 // 16376 // 16377 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 16378 // CHECK20-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 16379 // CHECK20-NEXT: entry: 16380 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 16381 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 16382 // CHECK20-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 16383 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 16384 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 16385 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 16386 // CHECK20-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 16387 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 16388 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 16389 // CHECK20-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 16390 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 16391 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 16392 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 16393 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 16394 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 16395 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 16396 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 16397 // CHECK20-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 16398 // CHECK20-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 16399 // CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 16400 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 16401 // CHECK20-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 16402 // CHECK20-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 16403 // CHECK20-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 16404 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 16405 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 16406 // CHECK20-NEXT: ret void 16407 // 16408 // 16409 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 16410 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 16411 // CHECK20-NEXT: entry: 16412 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 16413 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 16414 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 16415 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 16416 // CHECK20-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 16417 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 16418 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 16419 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 16420 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 16421 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 16422 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 16423 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 16424 // CHECK20-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 16425 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 16426 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 16427 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 16428 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 16429 // CHECK20-NEXT: ret void 16430 // 16431 // 16432 // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 16433 // CHECK20-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { 16434 // CHECK20-NEXT: entry: 16435 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 16436 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 16437 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 16438 // CHECK20-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 16439 // CHECK20-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 16440 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 16441 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 16442 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 16443 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 16444 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 16445 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 16446 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 16447 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 16448 // CHECK20-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 16449 // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 16450 // CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 16451 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 16452 // CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 16453 // CHECK20-NEXT: ret void 16454 // 16455 // 16456 // CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 16457 // CHECK20-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { 16458 // CHECK20-NEXT: entry: 16459 // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 16460 // CHECK20-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 16461 // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 16462 // CHECK20-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 16463 // CHECK20-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 16464 // CHECK20-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 16465 // CHECK20-NEXT: [[TMP:%.*]] = alloca i64, align 4 16466 // CHECK20-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 16467 // CHECK20-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 16468 // CHECK20-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 16469 // CHECK20-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 16470 // CHECK20-NEXT: [[I:%.*]] = alloca i64, align 8 16471 // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 16472 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 16473 // CHECK20-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 16474 // CHECK20-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 16475 // CHECK20-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 16476 // CHECK20-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 16477 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 16478 // CHECK20-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 16479 // CHECK20-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 16480 // CHECK20-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 16481 // CHECK20-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 16482 // CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 16483 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 16484 // CHECK20-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 16485 // CHECK20-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16486 // CHECK20-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 16487 // CHECK20-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 16488 // CHECK20: cond.true: 16489 // CHECK20-NEXT: br label [[COND_END:%.*]] 16490 // CHECK20: cond.false: 16491 // CHECK20-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16492 // CHECK20-NEXT: br label [[COND_END]] 16493 // CHECK20: cond.end: 16494 // CHECK20-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 16495 // CHECK20-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 16496 // CHECK20-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 16497 // CHECK20-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 16498 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16499 // CHECK20: omp.inner.for.cond: 16500 // CHECK20-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16501 // CHECK20-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16502 // CHECK20-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 16503 // CHECK20-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16504 // CHECK20: omp.inner.for.body: 16505 // CHECK20-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16506 // CHECK20-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 16507 // CHECK20-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 16508 // CHECK20-NEXT: store i64 [[ADD]], i64* [[I]], align 8 16509 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 16510 // CHECK20-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 16511 // CHECK20-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4 16512 // CHECK20-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4 16513 // CHECK20-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 16514 // CHECK20-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 16515 // CHECK20-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 16516 // CHECK20-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4 16517 // CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 16518 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 16519 // CHECK20-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 16520 // CHECK20-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 16521 // CHECK20-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16522 // CHECK20: omp.body.continue: 16523 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16524 // CHECK20: omp.inner.for.inc: 16525 // CHECK20-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16526 // CHECK20-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 16527 // CHECK20-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 16528 // CHECK20-NEXT: br label [[OMP_INNER_FOR_COND]] 16529 // CHECK20: omp.inner.for.end: 16530 // CHECK20-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 16531 // CHECK20: omp.loop.exit: 16532 // CHECK20-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 16533 // CHECK20-NEXT: ret void 16534 // 16535 // 16536 // CHECK20-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 16537 // CHECK20-SAME: () #[[ATTR6]] { 16538 // CHECK20-NEXT: entry: 16539 // CHECK20-NEXT: call void @__tgt_register_requires(i64 1) 16540 // CHECK20-NEXT: ret void 16541 // 16542 // 16543 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 16544 // CHECK25-SAME: () #[[ATTR0:[0-9]+]] { 16545 // CHECK25-NEXT: entry: 16546 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 16547 // CHECK25-NEXT: ret void 16548 // 16549 // 16550 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined. 16551 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 16552 // CHECK25-NEXT: entry: 16553 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 16554 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 16555 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 16556 // CHECK25-NEXT: [[TMP:%.*]] = alloca i32, align 4 16557 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 16558 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 16559 // CHECK25-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 16560 // CHECK25-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 16561 // CHECK25-NEXT: [[I:%.*]] = alloca i32, align 4 16562 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 16563 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 16564 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 16565 // CHECK25-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 16566 // CHECK25-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 16567 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 16568 // CHECK25-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 16569 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 16570 // CHECK25-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 16571 // CHECK25-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 16572 // CHECK25-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 16573 // CHECK25-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 16574 // CHECK25: cond.true: 16575 // CHECK25-NEXT: br label [[COND_END:%.*]] 16576 // CHECK25: cond.false: 16577 // CHECK25-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 16578 // CHECK25-NEXT: br label [[COND_END]] 16579 // CHECK25: cond.end: 16580 // CHECK25-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 16581 // CHECK25-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 16582 // CHECK25-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 16583 // CHECK25-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 16584 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16585 // CHECK25: omp.inner.for.cond: 16586 // CHECK25-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 16587 // CHECK25-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 16588 // CHECK25-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 16589 // CHECK25-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16590 // CHECK25: omp.inner.for.body: 16591 // CHECK25-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 16592 // CHECK25-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 16593 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 16594 // CHECK25-NEXT: store i32 [[ADD]], i32* [[I]], align 4 16595 // CHECK25-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 16596 // CHECK25-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 16597 // CHECK25-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 16598 // CHECK25: .cancel.exit: 16599 // CHECK25-NEXT: br label [[CANCEL_EXIT:%.*]] 16600 // CHECK25: .cancel.continue: 16601 // CHECK25-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 16602 // CHECK25-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 16603 // CHECK25-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 16604 // CHECK25: .cancel.exit2: 16605 // CHECK25-NEXT: br label [[CANCEL_EXIT]] 16606 // CHECK25: .cancel.continue3: 16607 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16608 // CHECK25: omp.body.continue: 16609 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16610 // CHECK25: omp.inner.for.inc: 16611 // CHECK25-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 16612 // CHECK25-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 16613 // CHECK25-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 16614 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]] 16615 // CHECK25: omp.inner.for.end: 16616 // CHECK25-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 16617 // CHECK25: omp.loop.exit: 16618 // CHECK25-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 16619 // CHECK25-NEXT: br label [[CANCEL_CONT:%.*]] 16620 // CHECK25: cancel.cont: 16621 // CHECK25-NEXT: ret void 16622 // CHECK25: cancel.exit: 16623 // CHECK25-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 16624 // CHECK25-NEXT: br label [[CANCEL_CONT]] 16625 // 16626 // 16627 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 16628 // CHECK25-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR0]] { 16629 // CHECK25-NEXT: entry: 16630 // CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 16631 // CHECK25-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 16632 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 16633 // CHECK25-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 16634 // CHECK25-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 16635 // CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 16636 // CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 16637 // CHECK25-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 16638 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 16639 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 16640 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 16641 // CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 16642 // CHECK25-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 16643 // CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 16644 // CHECK25-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 16645 // CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 16646 // CHECK25-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 16647 // CHECK25-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 16648 // CHECK25-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 16649 // CHECK25-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 16650 // CHECK25-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 16651 // CHECK25-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 16652 // CHECK25-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 16653 // CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 16654 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 16655 // CHECK25-NEXT: ret void 16656 // 16657 // 16658 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..1 16659 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR1]] { 16660 // CHECK25-NEXT: entry: 16661 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 16662 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 16663 // CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 16664 // CHECK25-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 16665 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 16666 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 16667 // CHECK25-NEXT: [[TMP:%.*]] = alloca i64, align 8 16668 // CHECK25-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 16669 // CHECK25-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 16670 // CHECK25-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 16671 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 16672 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 16673 // CHECK25-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 16674 // CHECK25-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 16675 // CHECK25-NEXT: [[IT:%.*]] = alloca i64, align 8 16676 // CHECK25-NEXT: [[LIN4:%.*]] = alloca i32, align 4 16677 // CHECK25-NEXT: [[A5:%.*]] = alloca i32, align 4 16678 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 16679 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 16680 // CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 16681 // CHECK25-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 16682 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 16683 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 16684 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 16685 // CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 16686 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 16687 // CHECK25-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 16688 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 16689 // CHECK25-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 16690 // CHECK25-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 16691 // CHECK25-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 16692 // CHECK25-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 16693 // CHECK25-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 16694 // CHECK25-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 16695 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 16696 // CHECK25-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 16697 // CHECK25-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 16698 // CHECK25-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 16699 // CHECK25-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 16700 // CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16701 // CHECK25-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 16702 // CHECK25-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 16703 // CHECK25: cond.true: 16704 // CHECK25-NEXT: br label [[COND_END:%.*]] 16705 // CHECK25: cond.false: 16706 // CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16707 // CHECK25-NEXT: br label [[COND_END]] 16708 // CHECK25: cond.end: 16709 // CHECK25-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 16710 // CHECK25-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 16711 // CHECK25-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 16712 // CHECK25-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 16713 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16714 // CHECK25: omp.inner.for.cond: 16715 // CHECK25-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16716 // CHECK25-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 16717 // CHECK25-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 16718 // CHECK25-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16719 // CHECK25: omp.inner.for.body: 16720 // CHECK25-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16721 // CHECK25-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 16722 // CHECK25-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 16723 // CHECK25-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 16724 // CHECK25-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 16725 // CHECK25-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 16726 // CHECK25-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16727 // CHECK25-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 16728 // CHECK25-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 16729 // CHECK25-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 16730 // CHECK25-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 16731 // CHECK25-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4 16732 // CHECK25-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 16733 // CHECK25-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 16734 // CHECK25-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16735 // CHECK25-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 16736 // CHECK25-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 16737 // CHECK25-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 16738 // CHECK25-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 16739 // CHECK25-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4 16740 // CHECK25-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8 16741 // CHECK25-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 16742 // CHECK25-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 16743 // CHECK25-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 16744 // CHECK25-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8 16745 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16746 // CHECK25: omp.body.continue: 16747 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16748 // CHECK25: omp.inner.for.inc: 16749 // CHECK25-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 16750 // CHECK25-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 16751 // CHECK25-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8 16752 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]] 16753 // CHECK25: omp.inner.for.end: 16754 // CHECK25-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 16755 // CHECK25: omp.loop.exit: 16756 // CHECK25-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 16757 // CHECK25-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 16758 // CHECK25-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 16759 // CHECK25-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 16760 // CHECK25: .omp.linear.pu: 16761 // CHECK25-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 16762 // CHECK25-NEXT: [[CONV18:%.*]] = sext i32 [[TMP20]] to i64 16763 // CHECK25-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 16764 // CHECK25-NEXT: [[MUL19:%.*]] = mul i64 4, [[TMP21]] 16765 // CHECK25-NEXT: [[ADD20:%.*]] = add i64 [[CONV18]], [[MUL19]] 16766 // CHECK25-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD20]] to i32 16767 // CHECK25-NEXT: store i32 [[CONV21]], i32* [[CONV1]], align 8 16768 // CHECK25-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 16769 // CHECK25-NEXT: [[CONV22:%.*]] = sext i32 [[TMP22]] to i64 16770 // CHECK25-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 16771 // CHECK25-NEXT: [[MUL23:%.*]] = mul i64 4, [[TMP23]] 16772 // CHECK25-NEXT: [[ADD24:%.*]] = add i64 [[CONV22]], [[MUL23]] 16773 // CHECK25-NEXT: [[CONV25:%.*]] = trunc i64 [[ADD24]] to i32 16774 // CHECK25-NEXT: store i32 [[CONV25]], i32* [[CONV2]], align 8 16775 // CHECK25-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 16776 // CHECK25: .omp.linear.pu.done: 16777 // CHECK25-NEXT: ret void 16778 // 16779 // 16780 // CHECK25-LABEL: define {{[^@]+}}@_Z7get_valv 16781 // CHECK25-SAME: () #[[ATTR3:[0-9]+]] { 16782 // CHECK25-NEXT: entry: 16783 // CHECK25-NEXT: ret i64 0 16784 // 16785 // 16786 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 16787 // CHECK25-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] { 16788 // CHECK25-NEXT: entry: 16789 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 16790 // CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 16791 // CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 16792 // CHECK25-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 16793 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 16794 // CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 16795 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 16796 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 16797 // CHECK25-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 16798 // CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 16799 // CHECK25-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 16800 // CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 16801 // CHECK25-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 16802 // CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 16803 // CHECK25-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 16804 // CHECK25-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 16805 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 16806 // CHECK25-NEXT: ret void 16807 // 16808 // 16809 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..2 16810 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR1]] { 16811 // CHECK25-NEXT: entry: 16812 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 16813 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 16814 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 16815 // CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 16816 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 16817 // CHECK25-NEXT: [[TMP:%.*]] = alloca i16, align 2 16818 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 16819 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 16820 // CHECK25-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 16821 // CHECK25-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 16822 // CHECK25-NEXT: [[IT:%.*]] = alloca i16, align 2 16823 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 16824 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 16825 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 16826 // CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 16827 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 16828 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 16829 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 16830 // CHECK25-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 16831 // CHECK25-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 16832 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 16833 // CHECK25-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 16834 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 16835 // CHECK25-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 16836 // CHECK25-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 16837 // CHECK25-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 16838 // CHECK25-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 16839 // CHECK25: cond.true: 16840 // CHECK25-NEXT: br label [[COND_END:%.*]] 16841 // CHECK25: cond.false: 16842 // CHECK25-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 16843 // CHECK25-NEXT: br label [[COND_END]] 16844 // CHECK25: cond.end: 16845 // CHECK25-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 16846 // CHECK25-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 16847 // CHECK25-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 16848 // CHECK25-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 16849 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 16850 // CHECK25: omp.inner.for.cond: 16851 // CHECK25-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 16852 // CHECK25-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 16853 // CHECK25-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 16854 // CHECK25-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 16855 // CHECK25: omp.inner.for.body: 16856 // CHECK25-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 16857 // CHECK25-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 16858 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 16859 // CHECK25-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 16860 // CHECK25-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2 16861 // CHECK25-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 16862 // CHECK25-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 16863 // CHECK25-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8 16864 // CHECK25-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8 16865 // CHECK25-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 16866 // CHECK25-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 16867 // CHECK25-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 16868 // CHECK25-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8 16869 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 16870 // CHECK25: omp.body.continue: 16871 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 16872 // CHECK25: omp.inner.for.inc: 16873 // CHECK25-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 16874 // CHECK25-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 16875 // CHECK25-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 16876 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]] 16877 // CHECK25: omp.inner.for.end: 16878 // CHECK25-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 16879 // CHECK25: omp.loop.exit: 16880 // CHECK25-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 16881 // CHECK25-NEXT: ret void 16882 // 16883 // 16884 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 16885 // CHECK25-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 16886 // CHECK25-NEXT: entry: 16887 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 16888 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 16889 // CHECK25-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 16890 // CHECK25-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 16891 // CHECK25-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 16892 // CHECK25-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 16893 // CHECK25-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 16894 // CHECK25-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 16895 // CHECK25-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 16896 // CHECK25-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 16897 // CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 16898 // CHECK25-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 16899 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 16900 // CHECK25-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 16901 // CHECK25-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 16902 // CHECK25-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 16903 // CHECK25-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 16904 // CHECK25-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 16905 // CHECK25-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 16906 // CHECK25-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 16907 // CHECK25-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 16908 // CHECK25-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 16909 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 16910 // CHECK25-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 16911 // CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 16912 // CHECK25-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 16913 // CHECK25-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 16914 // CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 16915 // CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 16916 // CHECK25-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 16917 // CHECK25-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 16918 // CHECK25-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 16919 // CHECK25-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 16920 // CHECK25-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 16921 // CHECK25-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 16922 // CHECK25-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 16923 // CHECK25-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 16924 // CHECK25-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 16925 // CHECK25-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 16926 // CHECK25-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 16927 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 16928 // CHECK25-NEXT: ret void 16929 // 16930 // 16931 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..3 16932 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 16933 // CHECK25-NEXT: entry: 16934 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 16935 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 16936 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 16937 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 16938 // CHECK25-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 16939 // CHECK25-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 16940 // CHECK25-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 16941 // CHECK25-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 16942 // CHECK25-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 16943 // CHECK25-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 16944 // CHECK25-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 16945 // CHECK25-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 16946 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 16947 // CHECK25-NEXT: [[TMP:%.*]] = alloca i8, align 1 16948 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 16949 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 16950 // CHECK25-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 16951 // CHECK25-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 16952 // CHECK25-NEXT: [[IT:%.*]] = alloca i8, align 1 16953 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 16954 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 16955 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 16956 // CHECK25-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 16957 // CHECK25-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 16958 // CHECK25-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 16959 // CHECK25-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 16960 // CHECK25-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 16961 // CHECK25-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 16962 // CHECK25-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 16963 // CHECK25-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 16964 // CHECK25-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 16965 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 16966 // CHECK25-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 16967 // CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 16968 // CHECK25-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 16969 // CHECK25-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 16970 // CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 16971 // CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 16972 // CHECK25-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 16973 // CHECK25-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 16974 // CHECK25-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 16975 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 16976 // CHECK25-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 16977 // CHECK25-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 16978 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 16979 // CHECK25-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 16980 // CHECK25-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 16981 // CHECK25-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 16982 // CHECK25-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 16983 // CHECK25-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 16984 // CHECK25: omp.dispatch.cond: 16985 // CHECK25-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 16986 // CHECK25-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 16987 // CHECK25-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 16988 // CHECK25: cond.true: 16989 // CHECK25-NEXT: br label [[COND_END:%.*]] 16990 // CHECK25: cond.false: 16991 // CHECK25-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 16992 // CHECK25-NEXT: br label [[COND_END]] 16993 // CHECK25: cond.end: 16994 // CHECK25-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 16995 // CHECK25-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 16996 // CHECK25-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 16997 // CHECK25-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 16998 // CHECK25-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 16999 // CHECK25-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17000 // CHECK25-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 17001 // CHECK25-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 17002 // CHECK25: omp.dispatch.body: 17003 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17004 // CHECK25: omp.inner.for.cond: 17005 // CHECK25-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17006 // CHECK25-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17007 // CHECK25-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 17008 // CHECK25-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17009 // CHECK25: omp.inner.for.body: 17010 // CHECK25-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17011 // CHECK25-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 17012 // CHECK25-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 17013 // CHECK25-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 17014 // CHECK25-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1 17015 // CHECK25-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8 17016 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 17017 // CHECK25-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 17018 // CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 17019 // CHECK25-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 17020 // CHECK25-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 17021 // CHECK25-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 17022 // CHECK25-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 17023 // CHECK25-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4 17024 // CHECK25-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 17025 // CHECK25-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4 17026 // CHECK25-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 17027 // CHECK25-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 17028 // CHECK25-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 17029 // CHECK25-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4 17030 // CHECK25-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 17031 // CHECK25-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 17032 // CHECK25-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8 17033 // CHECK25-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 17034 // CHECK25-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8 17035 // CHECK25-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 17036 // CHECK25-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 17037 // CHECK25-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 17038 // CHECK25-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8 17039 // CHECK25-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 17040 // CHECK25-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8 17041 // CHECK25-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 17042 // CHECK25-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8 17043 // CHECK25-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 17044 // CHECK25-NEXT: store i64 [[ADD22]], i64* [[X]], align 8 17045 // CHECK25-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 17046 // CHECK25-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8 17047 // CHECK25-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 17048 // CHECK25-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 17049 // CHECK25-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 17050 // CHECK25-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8 17051 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17052 // CHECK25: omp.body.continue: 17053 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17054 // CHECK25: omp.inner.for.inc: 17055 // CHECK25-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17056 // CHECK25-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 17057 // CHECK25-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4 17058 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]] 17059 // CHECK25: omp.inner.for.end: 17060 // CHECK25-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 17061 // CHECK25: omp.dispatch.inc: 17062 // CHECK25-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 17063 // CHECK25-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 17064 // CHECK25-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 17065 // CHECK25-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 17066 // CHECK25-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17067 // CHECK25-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 17068 // CHECK25-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 17069 // CHECK25-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 17070 // CHECK25-NEXT: br label [[OMP_DISPATCH_COND]] 17071 // CHECK25: omp.dispatch.end: 17072 // CHECK25-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 17073 // CHECK25-NEXT: ret void 17074 // 17075 // 17076 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 17077 // CHECK25-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 17078 // CHECK25-NEXT: entry: 17079 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17080 // CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17081 // CHECK25-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 17082 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17083 // CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17084 // CHECK25-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 17085 // CHECK25-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 17086 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17087 // CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17088 // CHECK25-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 17089 // CHECK25-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17090 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17091 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17092 // CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 17093 // CHECK25-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17094 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 17095 // CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17096 // CHECK25-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 17097 // CHECK25-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 17098 // CHECK25-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 17099 // CHECK25-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 17100 // CHECK25-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 17101 // CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 17102 // CHECK25-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 17103 // CHECK25-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 17104 // CHECK25-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 17105 // CHECK25-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 17106 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 17107 // CHECK25-NEXT: ret void 17108 // 17109 // 17110 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..4 17111 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 17112 // CHECK25-NEXT: entry: 17113 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17114 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17115 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17116 // CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17117 // CHECK25-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 17118 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17119 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 17120 // CHECK25-NEXT: [[TMP:%.*]] = alloca i32, align 4 17121 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17122 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17123 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17124 // CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17125 // CHECK25-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 17126 // CHECK25-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17127 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17128 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17129 // CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 17130 // CHECK25-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17131 // CHECK25-NEXT: ret void 17132 // 17133 // 17134 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 17135 // CHECK25-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 17136 // CHECK25-NEXT: entry: 17137 // CHECK25-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 17138 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 17139 // CHECK25-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17140 // CHECK25-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17141 // CHECK25-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 17142 // CHECK25-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 17143 // CHECK25-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 17144 // CHECK25-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 17145 // CHECK25-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17146 // CHECK25-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17147 // CHECK25-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 17148 // CHECK25-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 17149 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 17150 // CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 17151 // CHECK25-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 17152 // CHECK25-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 17153 // CHECK25-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 17154 // CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 17155 // CHECK25-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 17156 // CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 17157 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 17158 // CHECK25-NEXT: ret void 17159 // 17160 // 17161 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..5 17162 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 17163 // CHECK25-NEXT: entry: 17164 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17165 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17166 // CHECK25-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 17167 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 17168 // CHECK25-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17169 // CHECK25-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17170 // CHECK25-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 17171 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 17172 // CHECK25-NEXT: [[TMP:%.*]] = alloca i64, align 8 17173 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 17174 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 17175 // CHECK25-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 17176 // CHECK25-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17177 // CHECK25-NEXT: [[IT:%.*]] = alloca i64, align 8 17178 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17179 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17180 // CHECK25-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 17181 // CHECK25-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 17182 // CHECK25-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17183 // CHECK25-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17184 // CHECK25-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 17185 // CHECK25-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 17186 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 17187 // CHECK25-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 17188 // CHECK25-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 17189 // CHECK25-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 17190 // CHECK25-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 17191 // CHECK25-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 17192 // CHECK25-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 17193 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17194 // CHECK25-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17195 // CHECK25-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 17196 // CHECK25-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 17197 // CHECK25-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17198 // CHECK25-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 17199 // CHECK25-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17200 // CHECK25: cond.true: 17201 // CHECK25-NEXT: br label [[COND_END:%.*]] 17202 // CHECK25: cond.false: 17203 // CHECK25-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17204 // CHECK25-NEXT: br label [[COND_END]] 17205 // CHECK25: cond.end: 17206 // CHECK25-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 17207 // CHECK25-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 17208 // CHECK25-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 17209 // CHECK25-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 17210 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17211 // CHECK25: omp.inner.for.cond: 17212 // CHECK25-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17213 // CHECK25-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17214 // CHECK25-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 17215 // CHECK25-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17216 // CHECK25: omp.inner.for.body: 17217 // CHECK25-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17218 // CHECK25-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 17219 // CHECK25-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 17220 // CHECK25-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 17221 // CHECK25-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8 17222 // CHECK25-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 17223 // CHECK25-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 17224 // CHECK25-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 17225 // CHECK25-NEXT: store double [[ADD]], double* [[A]], align 8 17226 // CHECK25-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 17227 // CHECK25-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8 17228 // CHECK25-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 17229 // CHECK25-NEXT: store double [[INC]], double* [[A5]], align 8 17230 // CHECK25-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 17231 // CHECK25-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 17232 // CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 17233 // CHECK25-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 17234 // CHECK25-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2 17235 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17236 // CHECK25: omp.body.continue: 17237 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17238 // CHECK25: omp.inner.for.inc: 17239 // CHECK25-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17240 // CHECK25-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 17241 // CHECK25-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 17242 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]] 17243 // CHECK25: omp.inner.for.end: 17244 // CHECK25-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 17245 // CHECK25: omp.loop.exit: 17246 // CHECK25-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 17247 // CHECK25-NEXT: ret void 17248 // 17249 // 17250 // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 17251 // CHECK25-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 17252 // CHECK25-NEXT: entry: 17253 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17254 // CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17255 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17256 // CHECK25-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17257 // CHECK25-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 17258 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17259 // CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17260 // CHECK25-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17261 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17262 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17263 // CHECK25-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17264 // CHECK25-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 17265 // CHECK25-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17266 // CHECK25-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 17267 // CHECK25-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 17268 // CHECK25-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 17269 // CHECK25-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 17270 // CHECK25-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 17271 // CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 17272 // CHECK25-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 17273 // CHECK25-NEXT: ret void 17274 // 17275 // 17276 // CHECK25-LABEL: define {{[^@]+}}@.omp_outlined..6 17277 // CHECK25-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 17278 // CHECK25-NEXT: entry: 17279 // CHECK25-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17280 // CHECK25-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17281 // CHECK25-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17282 // CHECK25-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17283 // CHECK25-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17284 // CHECK25-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 17285 // CHECK25-NEXT: [[TMP:%.*]] = alloca i64, align 8 17286 // CHECK25-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 17287 // CHECK25-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 17288 // CHECK25-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 17289 // CHECK25-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17290 // CHECK25-NEXT: [[I:%.*]] = alloca i64, align 8 17291 // CHECK25-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17292 // CHECK25-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17293 // CHECK25-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17294 // CHECK25-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17295 // CHECK25-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17296 // CHECK25-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17297 // CHECK25-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17298 // CHECK25-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17299 // CHECK25-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 17300 // CHECK25-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 17301 // CHECK25-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 17302 // CHECK25-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17303 // CHECK25-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17304 // CHECK25-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 17305 // CHECK25-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 17306 // CHECK25-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17307 // CHECK25-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 17308 // CHECK25-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17309 // CHECK25: cond.true: 17310 // CHECK25-NEXT: br label [[COND_END:%.*]] 17311 // CHECK25: cond.false: 17312 // CHECK25-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17313 // CHECK25-NEXT: br label [[COND_END]] 17314 // CHECK25: cond.end: 17315 // CHECK25-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 17316 // CHECK25-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 17317 // CHECK25-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 17318 // CHECK25-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 17319 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17320 // CHECK25: omp.inner.for.cond: 17321 // CHECK25-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17322 // CHECK25-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17323 // CHECK25-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 17324 // CHECK25-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17325 // CHECK25: omp.inner.for.body: 17326 // CHECK25-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17327 // CHECK25-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 17328 // CHECK25-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 17329 // CHECK25-NEXT: store i64 [[ADD]], i64* [[I]], align 8 17330 // CHECK25-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8 17331 // CHECK25-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 17332 // CHECK25-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8 17333 // CHECK25-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8 17334 // CHECK25-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 17335 // CHECK25-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 17336 // CHECK25-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 17337 // CHECK25-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8 17338 // CHECK25-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 17339 // CHECK25-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 17340 // CHECK25-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 17341 // CHECK25-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4 17342 // CHECK25-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17343 // CHECK25: omp.body.continue: 17344 // CHECK25-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17345 // CHECK25: omp.inner.for.inc: 17346 // CHECK25-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17347 // CHECK25-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 17348 // CHECK25-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 17349 // CHECK25-NEXT: br label [[OMP_INNER_FOR_COND]] 17350 // CHECK25: omp.inner.for.end: 17351 // CHECK25-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 17352 // CHECK25: omp.loop.exit: 17353 // CHECK25-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 17354 // CHECK25-NEXT: ret void 17355 // 17356 // 17357 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 17358 // CHECK26-SAME: () #[[ATTR0:[0-9]+]] { 17359 // CHECK26-NEXT: entry: 17360 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 17361 // CHECK26-NEXT: ret void 17362 // 17363 // 17364 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined. 17365 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 17366 // CHECK26-NEXT: entry: 17367 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17368 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17369 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 17370 // CHECK26-NEXT: [[TMP:%.*]] = alloca i32, align 4 17371 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 17372 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 17373 // CHECK26-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 17374 // CHECK26-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17375 // CHECK26-NEXT: [[I:%.*]] = alloca i32, align 4 17376 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17377 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17378 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 17379 // CHECK26-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 17380 // CHECK26-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 17381 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17382 // CHECK26-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17383 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 17384 // CHECK26-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 17385 // CHECK26-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17386 // CHECK26-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 17387 // CHECK26-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17388 // CHECK26: cond.true: 17389 // CHECK26-NEXT: br label [[COND_END:%.*]] 17390 // CHECK26: cond.false: 17391 // CHECK26-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17392 // CHECK26-NEXT: br label [[COND_END]] 17393 // CHECK26: cond.end: 17394 // CHECK26-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 17395 // CHECK26-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 17396 // CHECK26-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 17397 // CHECK26-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 17398 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17399 // CHECK26: omp.inner.for.cond: 17400 // CHECK26-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17401 // CHECK26-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17402 // CHECK26-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 17403 // CHECK26-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17404 // CHECK26: omp.inner.for.body: 17405 // CHECK26-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17406 // CHECK26-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 17407 // CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 17408 // CHECK26-NEXT: store i32 [[ADD]], i32* [[I]], align 4 17409 // CHECK26-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 17410 // CHECK26-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 17411 // CHECK26-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 17412 // CHECK26: .cancel.exit: 17413 // CHECK26-NEXT: br label [[CANCEL_EXIT:%.*]] 17414 // CHECK26: .cancel.continue: 17415 // CHECK26-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 17416 // CHECK26-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 17417 // CHECK26-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 17418 // CHECK26: .cancel.exit2: 17419 // CHECK26-NEXT: br label [[CANCEL_EXIT]] 17420 // CHECK26: .cancel.continue3: 17421 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17422 // CHECK26: omp.body.continue: 17423 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17424 // CHECK26: omp.inner.for.inc: 17425 // CHECK26-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17426 // CHECK26-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 17427 // CHECK26-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 17428 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND]] 17429 // CHECK26: omp.inner.for.end: 17430 // CHECK26-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 17431 // CHECK26: omp.loop.exit: 17432 // CHECK26-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 17433 // CHECK26-NEXT: br label [[CANCEL_CONT:%.*]] 17434 // CHECK26: cancel.cont: 17435 // CHECK26-NEXT: ret void 17436 // CHECK26: cancel.exit: 17437 // CHECK26-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 17438 // CHECK26-NEXT: br label [[CANCEL_CONT]] 17439 // 17440 // 17441 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 17442 // CHECK26-SAME: (i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR0]] { 17443 // CHECK26-NEXT: entry: 17444 // CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17445 // CHECK26-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 17446 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17447 // CHECK26-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 17448 // CHECK26-NEXT: [[LIN_CASTED:%.*]] = alloca i64, align 8 17449 // CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17450 // CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17451 // CHECK26-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 17452 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17453 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17454 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 17455 // CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17456 // CHECK26-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 8 17457 // CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 17458 // CHECK26-NEXT: store i16 [[TMP0]], i16* [[CONV3]], align 2 17459 // CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[AA_CASTED]], align 8 17460 // CHECK26-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV1]], align 8 17461 // CHECK26-NEXT: [[CONV4:%.*]] = bitcast i64* [[LIN_CASTED]] to i32* 17462 // CHECK26-NEXT: store i32 [[TMP2]], i32* [[CONV4]], align 4 17463 // CHECK26-NEXT: [[TMP3:%.*]] = load i64, i64* [[LIN_CASTED]], align 8 17464 // CHECK26-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV2]], align 8 17465 // CHECK26-NEXT: [[CONV5:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17466 // CHECK26-NEXT: store i32 [[TMP4]], i32* [[CONV5]], align 4 17467 // CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[A_CASTED]], align 8 17468 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]], i64 [[TMP5]]) 17469 // CHECK26-NEXT: ret void 17470 // 17471 // 17472 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..1 17473 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[AA:%.*]], i64 [[LIN:%.*]], i64 [[A:%.*]]) #[[ATTR1]] { 17474 // CHECK26-NEXT: entry: 17475 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17476 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17477 // CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17478 // CHECK26-NEXT: [[LIN_ADDR:%.*]] = alloca i64, align 8 17479 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17480 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 17481 // CHECK26-NEXT: [[TMP:%.*]] = alloca i64, align 8 17482 // CHECK26-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 17483 // CHECK26-NEXT: [[DOTLINEAR_START3:%.*]] = alloca i32, align 4 17484 // CHECK26-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 17485 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 17486 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 17487 // CHECK26-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 17488 // CHECK26-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17489 // CHECK26-NEXT: [[IT:%.*]] = alloca i64, align 8 17490 // CHECK26-NEXT: [[LIN4:%.*]] = alloca i32, align 4 17491 // CHECK26-NEXT: [[A5:%.*]] = alloca i32, align 4 17492 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17493 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17494 // CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17495 // CHECK26-NEXT: store i64 [[LIN]], i64* [[LIN_ADDR]], align 8 17496 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17497 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17498 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[LIN_ADDR]] to i32* 17499 // CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17500 // CHECK26-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV1]], align 8 17501 // CHECK26-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 17502 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV2]], align 8 17503 // CHECK26-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START3]], align 4 17504 // CHECK26-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 17505 // CHECK26-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 17506 // CHECK26-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 17507 // CHECK26-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 17508 // CHECK26-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 17509 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17510 // CHECK26-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17511 // CHECK26-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 17512 // CHECK26-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 17513 // CHECK26-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 17514 // CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17515 // CHECK26-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 17516 // CHECK26-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17517 // CHECK26: cond.true: 17518 // CHECK26-NEXT: br label [[COND_END:%.*]] 17519 // CHECK26: cond.false: 17520 // CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17521 // CHECK26-NEXT: br label [[COND_END]] 17522 // CHECK26: cond.end: 17523 // CHECK26-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 17524 // CHECK26-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 17525 // CHECK26-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 17526 // CHECK26-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 17527 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17528 // CHECK26: omp.inner.for.cond: 17529 // CHECK26-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17530 // CHECK26-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 17531 // CHECK26-NEXT: [[CMP6:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 17532 // CHECK26-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17533 // CHECK26: omp.inner.for.body: 17534 // CHECK26-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17535 // CHECK26-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 17536 // CHECK26-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 17537 // CHECK26-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 17538 // CHECK26-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 17539 // CHECK26-NEXT: [[CONV7:%.*]] = sext i32 [[TMP10]] to i64 17540 // CHECK26-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17541 // CHECK26-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 17542 // CHECK26-NEXT: [[MUL8:%.*]] = mul i64 [[TMP11]], [[TMP12]] 17543 // CHECK26-NEXT: [[ADD:%.*]] = add i64 [[CONV7]], [[MUL8]] 17544 // CHECK26-NEXT: [[CONV9:%.*]] = trunc i64 [[ADD]] to i32 17545 // CHECK26-NEXT: store i32 [[CONV9]], i32* [[LIN4]], align 4 17546 // CHECK26-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 17547 // CHECK26-NEXT: [[CONV10:%.*]] = sext i32 [[TMP13]] to i64 17548 // CHECK26-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17549 // CHECK26-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 17550 // CHECK26-NEXT: [[MUL11:%.*]] = mul i64 [[TMP14]], [[TMP15]] 17551 // CHECK26-NEXT: [[ADD12:%.*]] = add i64 [[CONV10]], [[MUL11]] 17552 // CHECK26-NEXT: [[CONV13:%.*]] = trunc i64 [[ADD12]] to i32 17553 // CHECK26-NEXT: store i32 [[CONV13]], i32* [[A5]], align 4 17554 // CHECK26-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 8 17555 // CHECK26-NEXT: [[CONV14:%.*]] = sext i16 [[TMP16]] to i32 17556 // CHECK26-NEXT: [[ADD15:%.*]] = add nsw i32 [[CONV14]], 1 17557 // CHECK26-NEXT: [[CONV16:%.*]] = trunc i32 [[ADD15]] to i16 17558 // CHECK26-NEXT: store i16 [[CONV16]], i16* [[CONV]], align 8 17559 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17560 // CHECK26: omp.body.continue: 17561 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17562 // CHECK26: omp.inner.for.inc: 17563 // CHECK26-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 17564 // CHECK26-NEXT: [[ADD17:%.*]] = add i64 [[TMP17]], 1 17565 // CHECK26-NEXT: store i64 [[ADD17]], i64* [[DOTOMP_IV]], align 8 17566 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND]] 17567 // CHECK26: omp.inner.for.end: 17568 // CHECK26-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 17569 // CHECK26: omp.loop.exit: 17570 // CHECK26-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 17571 // CHECK26-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 17572 // CHECK26-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 17573 // CHECK26-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 17574 // CHECK26: .omp.linear.pu: 17575 // CHECK26-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 17576 // CHECK26-NEXT: [[CONV18:%.*]] = sext i32 [[TMP20]] to i64 17577 // CHECK26-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 17578 // CHECK26-NEXT: [[MUL19:%.*]] = mul i64 4, [[TMP21]] 17579 // CHECK26-NEXT: [[ADD20:%.*]] = add i64 [[CONV18]], [[MUL19]] 17580 // CHECK26-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD20]] to i32 17581 // CHECK26-NEXT: store i32 [[CONV21]], i32* [[CONV1]], align 8 17582 // CHECK26-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START3]], align 4 17583 // CHECK26-NEXT: [[CONV22:%.*]] = sext i32 [[TMP22]] to i64 17584 // CHECK26-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 17585 // CHECK26-NEXT: [[MUL23:%.*]] = mul i64 4, [[TMP23]] 17586 // CHECK26-NEXT: [[ADD24:%.*]] = add i64 [[CONV22]], [[MUL23]] 17587 // CHECK26-NEXT: [[CONV25:%.*]] = trunc i64 [[ADD24]] to i32 17588 // CHECK26-NEXT: store i32 [[CONV25]], i32* [[CONV2]], align 8 17589 // CHECK26-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 17590 // CHECK26: .omp.linear.pu.done: 17591 // CHECK26-NEXT: ret void 17592 // 17593 // 17594 // CHECK26-LABEL: define {{[^@]+}}@_Z7get_valv 17595 // CHECK26-SAME: () #[[ATTR3:[0-9]+]] { 17596 // CHECK26-NEXT: entry: 17597 // CHECK26-NEXT: ret i64 0 17598 // 17599 // 17600 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 17601 // CHECK26-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR0]] { 17602 // CHECK26-NEXT: entry: 17603 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17604 // CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17605 // CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17606 // CHECK26-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 17607 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17608 // CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17609 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17610 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17611 // CHECK26-NEXT: [[TMP0:%.*]] = load i32, i32* [[CONV]], align 8 17612 // CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17613 // CHECK26-NEXT: store i32 [[TMP0]], i32* [[CONV2]], align 4 17614 // CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8 17615 // CHECK26-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV1]], align 8 17616 // CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 17617 // CHECK26-NEXT: store i16 [[TMP2]], i16* [[CONV3]], align 2 17618 // CHECK26-NEXT: [[TMP3:%.*]] = load i64, i64* [[AA_CASTED]], align 8 17619 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i64 [[TMP1]], i64 [[TMP3]]) 17620 // CHECK26-NEXT: ret void 17621 // 17622 // 17623 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..2 17624 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]]) #[[ATTR1]] { 17625 // CHECK26-NEXT: entry: 17626 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17627 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17628 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17629 // CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17630 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 17631 // CHECK26-NEXT: [[TMP:%.*]] = alloca i16, align 2 17632 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 17633 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 17634 // CHECK26-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 17635 // CHECK26-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17636 // CHECK26-NEXT: [[IT:%.*]] = alloca i16, align 2 17637 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17638 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17639 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17640 // CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17641 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17642 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17643 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 17644 // CHECK26-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 17645 // CHECK26-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 17646 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17647 // CHECK26-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17648 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 17649 // CHECK26-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 17650 // CHECK26-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17651 // CHECK26-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 17652 // CHECK26-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17653 // CHECK26: cond.true: 17654 // CHECK26-NEXT: br label [[COND_END:%.*]] 17655 // CHECK26: cond.false: 17656 // CHECK26-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17657 // CHECK26-NEXT: br label [[COND_END]] 17658 // CHECK26: cond.end: 17659 // CHECK26-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 17660 // CHECK26-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 17661 // CHECK26-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 17662 // CHECK26-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 17663 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17664 // CHECK26: omp.inner.for.cond: 17665 // CHECK26-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17666 // CHECK26-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17667 // CHECK26-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 17668 // CHECK26-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17669 // CHECK26: omp.inner.for.body: 17670 // CHECK26-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17671 // CHECK26-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 17672 // CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 17673 // CHECK26-NEXT: [[CONV3:%.*]] = trunc i32 [[ADD]] to i16 17674 // CHECK26-NEXT: store i16 [[CONV3]], i16* [[IT]], align 2 17675 // CHECK26-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 17676 // CHECK26-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], 1 17677 // CHECK26-NEXT: store i32 [[ADD4]], i32* [[CONV]], align 8 17678 // CHECK26-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV1]], align 8 17679 // CHECK26-NEXT: [[CONV5:%.*]] = sext i16 [[TMP9]] to i32 17680 // CHECK26-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], 1 17681 // CHECK26-NEXT: [[CONV7:%.*]] = trunc i32 [[ADD6]] to i16 17682 // CHECK26-NEXT: store i16 [[CONV7]], i16* [[CONV1]], align 8 17683 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17684 // CHECK26: omp.body.continue: 17685 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17686 // CHECK26: omp.inner.for.inc: 17687 // CHECK26-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17688 // CHECK26-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP10]], 1 17689 // CHECK26-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4 17690 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND]] 17691 // CHECK26: omp.inner.for.end: 17692 // CHECK26-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 17693 // CHECK26: omp.loop.exit: 17694 // CHECK26-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 17695 // CHECK26-NEXT: ret void 17696 // 17697 // 17698 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 17699 // CHECK26-SAME: (i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 17700 // CHECK26-NEXT: entry: 17701 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17702 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 17703 // CHECK26-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17704 // CHECK26-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 17705 // CHECK26-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 17706 // CHECK26-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17707 // CHECK26-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 17708 // CHECK26-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 17709 // CHECK26-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 17710 // CHECK26-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 17711 // CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17712 // CHECK26-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 17713 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17714 // CHECK26-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 17715 // CHECK26-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17716 // CHECK26-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 17717 // CHECK26-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 17718 // CHECK26-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17719 // CHECK26-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 17720 // CHECK26-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 17721 // CHECK26-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 17722 // CHECK26-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 17723 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17724 // CHECK26-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 17725 // CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 17726 // CHECK26-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 17727 // CHECK26-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 17728 // CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 17729 // CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 17730 // CHECK26-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 17731 // CHECK26-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 17732 // CHECK26-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 17733 // CHECK26-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV]], align 8 17734 // CHECK26-NEXT: [[CONV6:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17735 // CHECK26-NEXT: store i32 [[TMP8]], i32* [[CONV6]], align 4 17736 // CHECK26-NEXT: [[TMP9:%.*]] = load i64, i64* [[A_CASTED]], align 8 17737 // CHECK26-NEXT: [[TMP10:%.*]] = load i32, i32* [[CONV5]], align 8 17738 // CHECK26-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 17739 // CHECK26-NEXT: store i32 [[TMP10]], i32* [[CONV7]], align 4 17740 // CHECK26-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 17741 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, [10 x float]*, i64, float*, [5 x [10 x double]]*, i64, i64, double*, %struct.TT*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP9]], [10 x float]* [[TMP0]], i64 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i64 [[TMP4]], i64 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i64 [[TMP11]]) 17742 // CHECK26-NEXT: ret void 17743 // 17744 // 17745 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..3 17746 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i64 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 8 dereferenceable(400) [[C:%.*]], i64 [[VLA1:%.*]], i64 [[VLA3:%.*]], double* nonnull align 8 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 8 dereferenceable(16) [[D:%.*]], i64 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 17747 // CHECK26-NEXT: entry: 17748 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17749 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17750 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17751 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 8 17752 // CHECK26-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17753 // CHECK26-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 8 17754 // CHECK26-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 8 17755 // CHECK26-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17756 // CHECK26-NEXT: [[VLA_ADDR4:%.*]] = alloca i64, align 8 17757 // CHECK26-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 8 17758 // CHECK26-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 8 17759 // CHECK26-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 17760 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 17761 // CHECK26-NEXT: [[TMP:%.*]] = alloca i8, align 1 17762 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 17763 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 17764 // CHECK26-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 17765 // CHECK26-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17766 // CHECK26-NEXT: [[IT:%.*]] = alloca i8, align 1 17767 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17768 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17769 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17770 // CHECK26-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 8 17771 // CHECK26-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17772 // CHECK26-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 8 17773 // CHECK26-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 8 17774 // CHECK26-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17775 // CHECK26-NEXT: store i64 [[VLA3]], i64* [[VLA_ADDR4]], align 8 17776 // CHECK26-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 8 17777 // CHECK26-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 8 17778 // CHECK26-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 17779 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17780 // CHECK26-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 8 17781 // CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 17782 // CHECK26-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 8 17783 // CHECK26-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 8 17784 // CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 17785 // CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[VLA_ADDR4]], align 8 17786 // CHECK26-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 8 17787 // CHECK26-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 8 17788 // CHECK26-NEXT: [[CONV5:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 17789 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 17790 // CHECK26-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 17791 // CHECK26-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 17792 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 17793 // CHECK26-NEXT: [[TMP8:%.*]] = load i32, i32* [[CONV5]], align 8 17794 // CHECK26-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 17795 // CHECK26-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 17796 // CHECK26-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 17797 // CHECK26-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 17798 // CHECK26: omp.dispatch.cond: 17799 // CHECK26-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17800 // CHECK26-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 17801 // CHECK26-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 17802 // CHECK26: cond.true: 17803 // CHECK26-NEXT: br label [[COND_END:%.*]] 17804 // CHECK26: cond.false: 17805 // CHECK26-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17806 // CHECK26-NEXT: br label [[COND_END]] 17807 // CHECK26: cond.end: 17808 // CHECK26-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 17809 // CHECK26-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 17810 // CHECK26-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 17811 // CHECK26-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 17812 // CHECK26-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17813 // CHECK26-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17814 // CHECK26-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 17815 // CHECK26-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 17816 // CHECK26: omp.dispatch.body: 17817 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 17818 // CHECK26: omp.inner.for.cond: 17819 // CHECK26-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17820 // CHECK26-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17821 // CHECK26-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 17822 // CHECK26-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 17823 // CHECK26: omp.inner.for.body: 17824 // CHECK26-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17825 // CHECK26-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 17826 // CHECK26-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 17827 // CHECK26-NEXT: [[CONV8:%.*]] = trunc i32 [[SUB]] to i8 17828 // CHECK26-NEXT: store i8 [[CONV8]], i8* [[IT]], align 1 17829 // CHECK26-NEXT: [[TMP19:%.*]] = load i32, i32* [[CONV]], align 8 17830 // CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 17831 // CHECK26-NEXT: store i32 [[ADD]], i32* [[CONV]], align 8 17832 // CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i64 0, i64 2 17833 // CHECK26-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 17834 // CHECK26-NEXT: [[CONV9:%.*]] = fpext float [[TMP20]] to double 17835 // CHECK26-NEXT: [[ADD10:%.*]] = fadd double [[CONV9]], 1.000000e+00 17836 // CHECK26-NEXT: [[CONV11:%.*]] = fptrunc double [[ADD10]] to float 17837 // CHECK26-NEXT: store float [[CONV11]], float* [[ARRAYIDX]], align 4 17838 // CHECK26-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds float, float* [[TMP2]], i64 3 17839 // CHECK26-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX12]], align 4 17840 // CHECK26-NEXT: [[CONV13:%.*]] = fpext float [[TMP21]] to double 17841 // CHECK26-NEXT: [[ADD14:%.*]] = fadd double [[CONV13]], 1.000000e+00 17842 // CHECK26-NEXT: [[CONV15:%.*]] = fptrunc double [[ADD14]] to float 17843 // CHECK26-NEXT: store float [[CONV15]], float* [[ARRAYIDX12]], align 4 17844 // CHECK26-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i64 0, i64 1 17845 // CHECK26-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 2 17846 // CHECK26-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX17]], align 8 17847 // CHECK26-NEXT: [[ADD18:%.*]] = fadd double [[TMP22]], 1.000000e+00 17848 // CHECK26-NEXT: store double [[ADD18]], double* [[ARRAYIDX17]], align 8 17849 // CHECK26-NEXT: [[TMP23:%.*]] = mul nsw i64 1, [[TMP5]] 17850 // CHECK26-NEXT: [[ARRAYIDX19:%.*]] = getelementptr inbounds double, double* [[TMP6]], i64 [[TMP23]] 17851 // CHECK26-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX19]], i64 3 17852 // CHECK26-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX20]], align 8 17853 // CHECK26-NEXT: [[ADD21:%.*]] = fadd double [[TMP24]], 1.000000e+00 17854 // CHECK26-NEXT: store double [[ADD21]], double* [[ARRAYIDX20]], align 8 17855 // CHECK26-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 17856 // CHECK26-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 8 17857 // CHECK26-NEXT: [[ADD22:%.*]] = add nsw i64 [[TMP25]], 1 17858 // CHECK26-NEXT: store i64 [[ADD22]], i64* [[X]], align 8 17859 // CHECK26-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 17860 // CHECK26-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 8 17861 // CHECK26-NEXT: [[CONV23:%.*]] = sext i8 [[TMP26]] to i32 17862 // CHECK26-NEXT: [[ADD24:%.*]] = add nsw i32 [[CONV23]], 1 17863 // CHECK26-NEXT: [[CONV25:%.*]] = trunc i32 [[ADD24]] to i8 17864 // CHECK26-NEXT: store i8 [[CONV25]], i8* [[Y]], align 8 17865 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 17866 // CHECK26: omp.body.continue: 17867 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 17868 // CHECK26: omp.inner.for.inc: 17869 // CHECK26-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 17870 // CHECK26-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP27]], 1 17871 // CHECK26-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_IV]], align 4 17872 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND]] 17873 // CHECK26: omp.inner.for.end: 17874 // CHECK26-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 17875 // CHECK26: omp.dispatch.inc: 17876 // CHECK26-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 17877 // CHECK26-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 17878 // CHECK26-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 17879 // CHECK26-NEXT: store i32 [[ADD27]], i32* [[DOTOMP_LB]], align 4 17880 // CHECK26-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 17881 // CHECK26-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 17882 // CHECK26-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 17883 // CHECK26-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_UB]], align 4 17884 // CHECK26-NEXT: br label [[OMP_DISPATCH_COND]] 17885 // CHECK26: omp.dispatch.end: 17886 // CHECK26-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 17887 // CHECK26-NEXT: ret void 17888 // 17889 // 17890 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 17891 // CHECK26-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 17892 // CHECK26-NEXT: entry: 17893 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17894 // CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17895 // CHECK26-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 17896 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17897 // CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 17898 // CHECK26-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 17899 // CHECK26-NEXT: [[AAA_CASTED:%.*]] = alloca i64, align 8 17900 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17901 // CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17902 // CHECK26-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 17903 // CHECK26-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17904 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17905 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17906 // CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 17907 // CHECK26-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17908 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 17909 // CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[A_CASTED]] to i32* 17910 // CHECK26-NEXT: store i32 [[TMP1]], i32* [[CONV3]], align 4 17911 // CHECK26-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 17912 // CHECK26-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 17913 // CHECK26-NEXT: [[CONV4:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 17914 // CHECK26-NEXT: store i16 [[TMP3]], i16* [[CONV4]], align 2 17915 // CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 17916 // CHECK26-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV2]], align 8 17917 // CHECK26-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* 17918 // CHECK26-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 17919 // CHECK26-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 17920 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) 17921 // CHECK26-NEXT: ret void 17922 // 17923 // 17924 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..4 17925 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], i64 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 17926 // CHECK26-NEXT: entry: 17927 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17928 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17929 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 17930 // CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 17931 // CHECK26-NEXT: [[AAA_ADDR:%.*]] = alloca i64, align 8 17932 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 17933 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 17934 // CHECK26-NEXT: [[TMP:%.*]] = alloca i32, align 4 17935 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17936 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17937 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 17938 // CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 17939 // CHECK26-NEXT: store i64 [[AAA]], i64* [[AAA_ADDR]], align 8 17940 // CHECK26-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 17941 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 17942 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 17943 // CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[AAA_ADDR]] to i8* 17944 // CHECK26-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 17945 // CHECK26-NEXT: ret void 17946 // 17947 // 17948 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 17949 // CHECK26-SAME: (%struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 17950 // CHECK26-NEXT: entry: 17951 // CHECK26-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 17952 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 17953 // CHECK26-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17954 // CHECK26-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17955 // CHECK26-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 17956 // CHECK26-NEXT: [[B_CASTED:%.*]] = alloca i64, align 8 17957 // CHECK26-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 17958 // CHECK26-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 17959 // CHECK26-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17960 // CHECK26-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17961 // CHECK26-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 17962 // CHECK26-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 17963 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 17964 // CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 17965 // CHECK26-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 17966 // CHECK26-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 17967 // CHECK26-NEXT: [[TMP4:%.*]] = load i32, i32* [[CONV]], align 8 17968 // CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* 17969 // CHECK26-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 17970 // CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 17971 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) 17972 // CHECK26-NEXT: ret void 17973 // 17974 // 17975 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..5 17976 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i64 [[B:%.*]], i64 [[VLA:%.*]], i64 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 17977 // CHECK26-NEXT: entry: 17978 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 17979 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 17980 // CHECK26-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 8 17981 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 17982 // CHECK26-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 17983 // CHECK26-NEXT: [[VLA_ADDR2:%.*]] = alloca i64, align 8 17984 // CHECK26-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 8 17985 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 17986 // CHECK26-NEXT: [[TMP:%.*]] = alloca i64, align 8 17987 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 17988 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 17989 // CHECK26-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 17990 // CHECK26-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 17991 // CHECK26-NEXT: [[IT:%.*]] = alloca i64, align 8 17992 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 17993 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 17994 // CHECK26-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 17995 // CHECK26-NEXT: store i64 [[B]], i64* [[B_ADDR]], align 8 17996 // CHECK26-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 17997 // CHECK26-NEXT: store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8 17998 // CHECK26-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 8 17999 // CHECK26-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 18000 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[B_ADDR]] to i32* 18001 // CHECK26-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 18002 // CHECK26-NEXT: [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8 18003 // CHECK26-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 8 18004 // CHECK26-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 18005 // CHECK26-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 18006 // CHECK26-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 18007 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18008 // CHECK26-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 18009 // CHECK26-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 18010 // CHECK26-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 18011 // CHECK26-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18012 // CHECK26-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 18013 // CHECK26-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18014 // CHECK26: cond.true: 18015 // CHECK26-NEXT: br label [[COND_END:%.*]] 18016 // CHECK26: cond.false: 18017 // CHECK26-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18018 // CHECK26-NEXT: br label [[COND_END]] 18019 // CHECK26: cond.end: 18020 // CHECK26-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 18021 // CHECK26-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 18022 // CHECK26-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 18023 // CHECK26-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 18024 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18025 // CHECK26: omp.inner.for.cond: 18026 // CHECK26-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18027 // CHECK26-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18028 // CHECK26-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 18029 // CHECK26-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18030 // CHECK26: omp.inner.for.body: 18031 // CHECK26-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18032 // CHECK26-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 18033 // CHECK26-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 18034 // CHECK26-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 18035 // CHECK26-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8 18036 // CHECK26-NEXT: [[CONV4:%.*]] = sitofp i32 [[TMP12]] to double 18037 // CHECK26-NEXT: [[ADD:%.*]] = fadd double [[CONV4]], 1.500000e+00 18038 // CHECK26-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 18039 // CHECK26-NEXT: store double [[ADD]], double* [[A]], align 8 18040 // CHECK26-NEXT: [[A5:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 18041 // CHECK26-NEXT: [[TMP13:%.*]] = load double, double* [[A5]], align 8 18042 // CHECK26-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 18043 // CHECK26-NEXT: store double [[INC]], double* [[A5]], align 8 18044 // CHECK26-NEXT: [[CONV6:%.*]] = fptosi double [[INC]] to i16 18045 // CHECK26-NEXT: [[TMP14:%.*]] = mul nsw i64 1, [[TMP2]] 18046 // CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i64 [[TMP14]] 18047 // CHECK26-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 18048 // CHECK26-NEXT: store i16 [[CONV6]], i16* [[ARRAYIDX7]], align 2 18049 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18050 // CHECK26: omp.body.continue: 18051 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18052 // CHECK26: omp.inner.for.inc: 18053 // CHECK26-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18054 // CHECK26-NEXT: [[ADD8:%.*]] = add i64 [[TMP15]], 1 18055 // CHECK26-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 18056 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND]] 18057 // CHECK26: omp.inner.for.end: 18058 // CHECK26-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18059 // CHECK26: omp.loop.exit: 18060 // CHECK26-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 18061 // CHECK26-NEXT: ret void 18062 // 18063 // 18064 // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 18065 // CHECK26-SAME: (i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 18066 // CHECK26-NEXT: entry: 18067 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18068 // CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18069 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 18070 // CHECK26-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8 18071 // CHECK26-NEXT: [[AA_CASTED:%.*]] = alloca i64, align 8 18072 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18073 // CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18074 // CHECK26-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 18075 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18076 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18077 // CHECK26-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 18078 // CHECK26-NEXT: [[TMP1:%.*]] = load i32, i32* [[CONV]], align 8 18079 // CHECK26-NEXT: [[CONV2:%.*]] = bitcast i64* [[A_CASTED]] to i32* 18080 // CHECK26-NEXT: store i32 [[TMP1]], i32* [[CONV2]], align 4 18081 // CHECK26-NEXT: [[TMP2:%.*]] = load i64, i64* [[A_CASTED]], align 8 18082 // CHECK26-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV1]], align 8 18083 // CHECK26-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* 18084 // CHECK26-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 18085 // CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 18086 // CHECK26-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) 18087 // CHECK26-NEXT: ret void 18088 // 18089 // 18090 // CHECK26-LABEL: define {{[^@]+}}@.omp_outlined..6 18091 // CHECK26-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[A:%.*]], i64 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 18092 // CHECK26-NEXT: entry: 18093 // CHECK26-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 18094 // CHECK26-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 18095 // CHECK26-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 18096 // CHECK26-NEXT: [[AA_ADDR:%.*]] = alloca i64, align 8 18097 // CHECK26-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 18098 // CHECK26-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 18099 // CHECK26-NEXT: [[TMP:%.*]] = alloca i64, align 8 18100 // CHECK26-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 18101 // CHECK26-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 18102 // CHECK26-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 18103 // CHECK26-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18104 // CHECK26-NEXT: [[I:%.*]] = alloca i64, align 8 18105 // CHECK26-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 18106 // CHECK26-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 18107 // CHECK26-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8 18108 // CHECK26-NEXT: store i64 [[AA]], i64* [[AA_ADDR]], align 8 18109 // CHECK26-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 18110 // CHECK26-NEXT: [[CONV:%.*]] = bitcast i64* [[A_ADDR]] to i32* 18111 // CHECK26-NEXT: [[CONV1:%.*]] = bitcast i64* [[AA_ADDR]] to i16* 18112 // CHECK26-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 18113 // CHECK26-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 18114 // CHECK26-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 18115 // CHECK26-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 18116 // CHECK26-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18117 // CHECK26-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 18118 // CHECK26-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 18119 // CHECK26-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 18120 // CHECK26-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18121 // CHECK26-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 18122 // CHECK26-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18123 // CHECK26: cond.true: 18124 // CHECK26-NEXT: br label [[COND_END:%.*]] 18125 // CHECK26: cond.false: 18126 // CHECK26-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18127 // CHECK26-NEXT: br label [[COND_END]] 18128 // CHECK26: cond.end: 18129 // CHECK26-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 18130 // CHECK26-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 18131 // CHECK26-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 18132 // CHECK26-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 18133 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18134 // CHECK26: omp.inner.for.cond: 18135 // CHECK26-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18136 // CHECK26-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18137 // CHECK26-NEXT: [[CMP2:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 18138 // CHECK26-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18139 // CHECK26: omp.inner.for.body: 18140 // CHECK26-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18141 // CHECK26-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 18142 // CHECK26-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 18143 // CHECK26-NEXT: store i64 [[ADD]], i64* [[I]], align 8 18144 // CHECK26-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8 18145 // CHECK26-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP9]], 1 18146 // CHECK26-NEXT: store i32 [[ADD3]], i32* [[CONV]], align 8 18147 // CHECK26-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV1]], align 8 18148 // CHECK26-NEXT: [[CONV4:%.*]] = sext i16 [[TMP10]] to i32 18149 // CHECK26-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 18150 // CHECK26-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 18151 // CHECK26-NEXT: store i16 [[CONV6]], i16* [[CONV1]], align 8 18152 // CHECK26-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 2 18153 // CHECK26-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 18154 // CHECK26-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], 1 18155 // CHECK26-NEXT: store i32 [[ADD7]], i32* [[ARRAYIDX]], align 4 18156 // CHECK26-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18157 // CHECK26: omp.body.continue: 18158 // CHECK26-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18159 // CHECK26: omp.inner.for.inc: 18160 // CHECK26-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18161 // CHECK26-NEXT: [[ADD8:%.*]] = add nsw i64 [[TMP12]], 1 18162 // CHECK26-NEXT: store i64 [[ADD8]], i64* [[DOTOMP_IV]], align 8 18163 // CHECK26-NEXT: br label [[OMP_INNER_FOR_COND]] 18164 // CHECK26: omp.inner.for.end: 18165 // CHECK26-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18166 // CHECK26: omp.loop.exit: 18167 // CHECK26-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 18168 // CHECK26-NEXT: ret void 18169 // 18170 // 18171 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 18172 // CHECK27-SAME: () #[[ATTR0:[0-9]+]] { 18173 // CHECK27-NEXT: entry: 18174 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 18175 // CHECK27-NEXT: ret void 18176 // 18177 // 18178 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined. 18179 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 18180 // CHECK27-NEXT: entry: 18181 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18182 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18183 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18184 // CHECK27-NEXT: [[TMP:%.*]] = alloca i32, align 4 18185 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 18186 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 18187 // CHECK27-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 18188 // CHECK27-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18189 // CHECK27-NEXT: [[I:%.*]] = alloca i32, align 4 18190 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18191 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18192 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 18193 // CHECK27-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 18194 // CHECK27-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 18195 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18196 // CHECK27-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 18197 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 18198 // CHECK27-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 18199 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18200 // CHECK27-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 18201 // CHECK27-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18202 // CHECK27: cond.true: 18203 // CHECK27-NEXT: br label [[COND_END:%.*]] 18204 // CHECK27: cond.false: 18205 // CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18206 // CHECK27-NEXT: br label [[COND_END]] 18207 // CHECK27: cond.end: 18208 // CHECK27-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 18209 // CHECK27-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 18210 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 18211 // CHECK27-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 18212 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18213 // CHECK27: omp.inner.for.cond: 18214 // CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18215 // CHECK27-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18216 // CHECK27-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 18217 // CHECK27-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18218 // CHECK27: omp.inner.for.body: 18219 // CHECK27-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18220 // CHECK27-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 18221 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 18222 // CHECK27-NEXT: store i32 [[ADD]], i32* [[I]], align 4 18223 // CHECK27-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 18224 // CHECK27-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 18225 // CHECK27-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 18226 // CHECK27: .cancel.exit: 18227 // CHECK27-NEXT: br label [[CANCEL_EXIT:%.*]] 18228 // CHECK27: .cancel.continue: 18229 // CHECK27-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 18230 // CHECK27-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 18231 // CHECK27-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 18232 // CHECK27: .cancel.exit2: 18233 // CHECK27-NEXT: br label [[CANCEL_EXIT]] 18234 // CHECK27: .cancel.continue3: 18235 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18236 // CHECK27: omp.body.continue: 18237 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18238 // CHECK27: omp.inner.for.inc: 18239 // CHECK27-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18240 // CHECK27-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 18241 // CHECK27-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 18242 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]] 18243 // CHECK27: omp.inner.for.end: 18244 // CHECK27-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18245 // CHECK27: omp.loop.exit: 18246 // CHECK27-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 18247 // CHECK27-NEXT: br label [[CANCEL_CONT:%.*]] 18248 // CHECK27: cancel.cont: 18249 // CHECK27-NEXT: ret void 18250 // CHECK27: cancel.exit: 18251 // CHECK27-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 18252 // CHECK27-NEXT: br label [[CANCEL_CONT]] 18253 // 18254 // 18255 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 18256 // CHECK27-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR0]] { 18257 // CHECK27-NEXT: entry: 18258 // CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18259 // CHECK27-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 18260 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18261 // CHECK27-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 18262 // CHECK27-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 18263 // CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 18264 // CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18265 // CHECK27-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 18266 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18267 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 18268 // CHECK27-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 18269 // CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 18270 // CHECK27-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 18271 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 18272 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 18273 // CHECK27-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 18274 // CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 18275 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 18276 // CHECK27-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 18277 // CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 18278 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 18279 // CHECK27-NEXT: ret void 18280 // 18281 // 18282 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..1 18283 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR1]] { 18284 // CHECK27-NEXT: entry: 18285 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18286 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18287 // CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18288 // CHECK27-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 18289 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18290 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 18291 // CHECK27-NEXT: [[TMP:%.*]] = alloca i64, align 4 18292 // CHECK27-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 18293 // CHECK27-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 18294 // CHECK27-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 18295 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 18296 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 18297 // CHECK27-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 18298 // CHECK27-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18299 // CHECK27-NEXT: [[IT:%.*]] = alloca i64, align 8 18300 // CHECK27-NEXT: [[LIN2:%.*]] = alloca i32, align 4 18301 // CHECK27-NEXT: [[A3:%.*]] = alloca i32, align 4 18302 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18303 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18304 // CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18305 // CHECK27-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 18306 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18307 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 18308 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 18309 // CHECK27-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 18310 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 18311 // CHECK27-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 18312 // CHECK27-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 18313 // CHECK27-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 18314 // CHECK27-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 18315 // CHECK27-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 18316 // CHECK27-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 18317 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18318 // CHECK27-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 18319 // CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 18320 // CHECK27-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 18321 // CHECK27-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 18322 // CHECK27-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18323 // CHECK27-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 18324 // CHECK27-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18325 // CHECK27: cond.true: 18326 // CHECK27-NEXT: br label [[COND_END:%.*]] 18327 // CHECK27: cond.false: 18328 // CHECK27-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18329 // CHECK27-NEXT: br label [[COND_END]] 18330 // CHECK27: cond.end: 18331 // CHECK27-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 18332 // CHECK27-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 18333 // CHECK27-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 18334 // CHECK27-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 18335 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18336 // CHECK27: omp.inner.for.cond: 18337 // CHECK27-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18338 // CHECK27-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18339 // CHECK27-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 18340 // CHECK27-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18341 // CHECK27: omp.inner.for.body: 18342 // CHECK27-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18343 // CHECK27-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 18344 // CHECK27-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 18345 // CHECK27-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 18346 // CHECK27-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 18347 // CHECK27-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 18348 // CHECK27-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18349 // CHECK27-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 18350 // CHECK27-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 18351 // CHECK27-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 18352 // CHECK27-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 18353 // CHECK27-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4 18354 // CHECK27-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 18355 // CHECK27-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 18356 // CHECK27-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18357 // CHECK27-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 18358 // CHECK27-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 18359 // CHECK27-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 18360 // CHECK27-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 18361 // CHECK27-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4 18362 // CHECK27-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4 18363 // CHECK27-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 18364 // CHECK27-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 18365 // CHECK27-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 18366 // CHECK27-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4 18367 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18368 // CHECK27: omp.body.continue: 18369 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18370 // CHECK27: omp.inner.for.inc: 18371 // CHECK27-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18372 // CHECK27-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 18373 // CHECK27-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8 18374 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]] 18375 // CHECK27: omp.inner.for.end: 18376 // CHECK27-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18377 // CHECK27: omp.loop.exit: 18378 // CHECK27-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 18379 // CHECK27-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 18380 // CHECK27-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 18381 // CHECK27-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 18382 // CHECK27: .omp.linear.pu: 18383 // CHECK27-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 18384 // CHECK27-NEXT: [[CONV16:%.*]] = sext i32 [[TMP20]] to i64 18385 // CHECK27-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 18386 // CHECK27-NEXT: [[MUL17:%.*]] = mul i64 4, [[TMP21]] 18387 // CHECK27-NEXT: [[ADD18:%.*]] = add i64 [[CONV16]], [[MUL17]] 18388 // CHECK27-NEXT: [[CONV19:%.*]] = trunc i64 [[ADD18]] to i32 18389 // CHECK27-NEXT: store i32 [[CONV19]], i32* [[LIN_ADDR]], align 4 18390 // CHECK27-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 18391 // CHECK27-NEXT: [[CONV20:%.*]] = sext i32 [[TMP22]] to i64 18392 // CHECK27-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 18393 // CHECK27-NEXT: [[MUL21:%.*]] = mul i64 4, [[TMP23]] 18394 // CHECK27-NEXT: [[ADD22:%.*]] = add i64 [[CONV20]], [[MUL21]] 18395 // CHECK27-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD22]] to i32 18396 // CHECK27-NEXT: store i32 [[CONV23]], i32* [[A_ADDR]], align 4 18397 // CHECK27-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 18398 // CHECK27: .omp.linear.pu.done: 18399 // CHECK27-NEXT: ret void 18400 // 18401 // 18402 // CHECK27-LABEL: define {{[^@]+}}@_Z7get_valv 18403 // CHECK27-SAME: () #[[ATTR3:[0-9]+]] { 18404 // CHECK27-NEXT: entry: 18405 // CHECK27-NEXT: ret i64 0 18406 // 18407 // 18408 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 18409 // CHECK27-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] { 18410 // CHECK27-NEXT: entry: 18411 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18412 // CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18413 // CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 18414 // CHECK27-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 18415 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18416 // CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18417 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 18418 // CHECK27-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 18419 // CHECK27-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 18420 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 18421 // CHECK27-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 18422 // CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 18423 // CHECK27-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 18424 // CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 18425 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 18426 // CHECK27-NEXT: ret void 18427 // 18428 // 18429 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..2 18430 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR1]] { 18431 // CHECK27-NEXT: entry: 18432 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18433 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18434 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18435 // CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18436 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18437 // CHECK27-NEXT: [[TMP:%.*]] = alloca i16, align 2 18438 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 18439 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 18440 // CHECK27-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 18441 // CHECK27-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18442 // CHECK27-NEXT: [[IT:%.*]] = alloca i16, align 2 18443 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18444 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18445 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18446 // CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18447 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 18448 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 18449 // CHECK27-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 18450 // CHECK27-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 18451 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18452 // CHECK27-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 18453 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 18454 // CHECK27-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 18455 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18456 // CHECK27-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 18457 // CHECK27-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18458 // CHECK27: cond.true: 18459 // CHECK27-NEXT: br label [[COND_END:%.*]] 18460 // CHECK27: cond.false: 18461 // CHECK27-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18462 // CHECK27-NEXT: br label [[COND_END]] 18463 // CHECK27: cond.end: 18464 // CHECK27-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 18465 // CHECK27-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 18466 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 18467 // CHECK27-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 18468 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18469 // CHECK27: omp.inner.for.cond: 18470 // CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18471 // CHECK27-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18472 // CHECK27-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 18473 // CHECK27-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18474 // CHECK27: omp.inner.for.body: 18475 // CHECK27-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18476 // CHECK27-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 18477 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 18478 // CHECK27-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 18479 // CHECK27-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2 18480 // CHECK27-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 18481 // CHECK27-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 18482 // CHECK27-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4 18483 // CHECK27-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4 18484 // CHECK27-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 18485 // CHECK27-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 18486 // CHECK27-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 18487 // CHECK27-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4 18488 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18489 // CHECK27: omp.body.continue: 18490 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18491 // CHECK27: omp.inner.for.inc: 18492 // CHECK27-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18493 // CHECK27-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 18494 // CHECK27-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 18495 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]] 18496 // CHECK27: omp.inner.for.end: 18497 // CHECK27-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18498 // CHECK27: omp.loop.exit: 18499 // CHECK27-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 18500 // CHECK27-NEXT: ret void 18501 // 18502 // 18503 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 18504 // CHECK27-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 18505 // CHECK27-NEXT: entry: 18506 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18507 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 18508 // CHECK27-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 18509 // CHECK27-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 18510 // CHECK27-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 18511 // CHECK27-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 18512 // CHECK27-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 18513 // CHECK27-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 18514 // CHECK27-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 18515 // CHECK27-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 18516 // CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 18517 // CHECK27-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 18518 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18519 // CHECK27-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 18520 // CHECK27-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 18521 // CHECK27-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 18522 // CHECK27-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 18523 // CHECK27-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 18524 // CHECK27-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 18525 // CHECK27-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 18526 // CHECK27-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 18527 // CHECK27-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 18528 // CHECK27-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 18529 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 18530 // CHECK27-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 18531 // CHECK27-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 18532 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 18533 // CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 18534 // CHECK27-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 18535 // CHECK27-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 18536 // CHECK27-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 18537 // CHECK27-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 18538 // CHECK27-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 18539 // CHECK27-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 18540 // CHECK27-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 18541 // CHECK27-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 18542 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 18543 // CHECK27-NEXT: ret void 18544 // 18545 // 18546 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..3 18547 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 18548 // CHECK27-NEXT: entry: 18549 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18550 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18551 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18552 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 18553 // CHECK27-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 18554 // CHECK27-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 18555 // CHECK27-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 18556 // CHECK27-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 18557 // CHECK27-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 18558 // CHECK27-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 18559 // CHECK27-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 18560 // CHECK27-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 18561 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18562 // CHECK27-NEXT: [[TMP:%.*]] = alloca i8, align 1 18563 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 18564 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 18565 // CHECK27-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 18566 // CHECK27-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18567 // CHECK27-NEXT: [[IT:%.*]] = alloca i8, align 1 18568 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18569 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18570 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18571 // CHECK27-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 18572 // CHECK27-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 18573 // CHECK27-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 18574 // CHECK27-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 18575 // CHECK27-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 18576 // CHECK27-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 18577 // CHECK27-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 18578 // CHECK27-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 18579 // CHECK27-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 18580 // CHECK27-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 18581 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 18582 // CHECK27-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 18583 // CHECK27-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 18584 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 18585 // CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 18586 // CHECK27-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 18587 // CHECK27-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 18588 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 18589 // CHECK27-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 18590 // CHECK27-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 18591 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18592 // CHECK27-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 18593 // CHECK27-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 18594 // CHECK27-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 18595 // CHECK27-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 18596 // CHECK27-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 18597 // CHECK27: omp.dispatch.cond: 18598 // CHECK27-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18599 // CHECK27-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 18600 // CHECK27-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18601 // CHECK27: cond.true: 18602 // CHECK27-NEXT: br label [[COND_END:%.*]] 18603 // CHECK27: cond.false: 18604 // CHECK27-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18605 // CHECK27-NEXT: br label [[COND_END]] 18606 // CHECK27: cond.end: 18607 // CHECK27-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 18608 // CHECK27-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 18609 // CHECK27-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 18610 // CHECK27-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 18611 // CHECK27-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18612 // CHECK27-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18613 // CHECK27-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 18614 // CHECK27-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 18615 // CHECK27: omp.dispatch.body: 18616 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18617 // CHECK27: omp.inner.for.cond: 18618 // CHECK27-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18619 // CHECK27-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18620 // CHECK27-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 18621 // CHECK27-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18622 // CHECK27: omp.inner.for.body: 18623 // CHECK27-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18624 // CHECK27-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 18625 // CHECK27-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 18626 // CHECK27-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 18627 // CHECK27-NEXT: store i8 [[CONV]], i8* [[IT]], align 1 18628 // CHECK27-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4 18629 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 18630 // CHECK27-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 18631 // CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 18632 // CHECK27-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 18633 // CHECK27-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 18634 // CHECK27-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 18635 // CHECK27-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 18636 // CHECK27-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4 18637 // CHECK27-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 18638 // CHECK27-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4 18639 // CHECK27-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 18640 // CHECK27-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 18641 // CHECK27-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 18642 // CHECK27-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4 18643 // CHECK27-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 18644 // CHECK27-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 18645 // CHECK27-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8 18646 // CHECK27-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 18647 // CHECK27-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8 18648 // CHECK27-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 18649 // CHECK27-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 18650 // CHECK27-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 18651 // CHECK27-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8 18652 // CHECK27-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 18653 // CHECK27-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8 18654 // CHECK27-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 18655 // CHECK27-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4 18656 // CHECK27-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 18657 // CHECK27-NEXT: store i64 [[ADD20]], i64* [[X]], align 4 18658 // CHECK27-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 18659 // CHECK27-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4 18660 // CHECK27-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 18661 // CHECK27-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 18662 // CHECK27-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 18663 // CHECK27-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4 18664 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18665 // CHECK27: omp.body.continue: 18666 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18667 // CHECK27: omp.inner.for.inc: 18668 // CHECK27-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 18669 // CHECK27-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 18670 // CHECK27-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4 18671 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]] 18672 // CHECK27: omp.inner.for.end: 18673 // CHECK27-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 18674 // CHECK27: omp.dispatch.inc: 18675 // CHECK27-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 18676 // CHECK27-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 18677 // CHECK27-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 18678 // CHECK27-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 18679 // CHECK27-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18680 // CHECK27-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 18681 // CHECK27-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 18682 // CHECK27-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 18683 // CHECK27-NEXT: br label [[OMP_DISPATCH_COND]] 18684 // CHECK27: omp.dispatch.end: 18685 // CHECK27-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 18686 // CHECK27-NEXT: ret void 18687 // 18688 // 18689 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 18690 // CHECK27-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 18691 // CHECK27-NEXT: entry: 18692 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18693 // CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18694 // CHECK27-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 18695 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 18696 // CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 18697 // CHECK27-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 18698 // CHECK27-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 18699 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18700 // CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18701 // CHECK27-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 18702 // CHECK27-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 18703 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 18704 // CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 18705 // CHECK27-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 18706 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 18707 // CHECK27-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 18708 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 18709 // CHECK27-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 18710 // CHECK27-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 18711 // CHECK27-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 18712 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 18713 // CHECK27-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 18714 // CHECK27-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 18715 // CHECK27-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 18716 // CHECK27-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 18717 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 18718 // CHECK27-NEXT: ret void 18719 // 18720 // 18721 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..4 18722 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 18723 // CHECK27-NEXT: entry: 18724 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18725 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18726 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18727 // CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18728 // CHECK27-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 18729 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 18730 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18731 // CHECK27-NEXT: [[TMP:%.*]] = alloca i32, align 4 18732 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18733 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18734 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18735 // CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18736 // CHECK27-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 18737 // CHECK27-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 18738 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 18739 // CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 18740 // CHECK27-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 18741 // CHECK27-NEXT: ret void 18742 // 18743 // 18744 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 18745 // CHECK27-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 18746 // CHECK27-NEXT: entry: 18747 // CHECK27-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 18748 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 18749 // CHECK27-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 18750 // CHECK27-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 18751 // CHECK27-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 18752 // CHECK27-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 18753 // CHECK27-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 18754 // CHECK27-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 18755 // CHECK27-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 18756 // CHECK27-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 18757 // CHECK27-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 18758 // CHECK27-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 18759 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 18760 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 18761 // CHECK27-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 18762 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 18763 // CHECK27-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 18764 // CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 18765 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 18766 // CHECK27-NEXT: ret void 18767 // 18768 // 18769 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..5 18770 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 18771 // CHECK27-NEXT: entry: 18772 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18773 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18774 // CHECK27-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 18775 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 18776 // CHECK27-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 18777 // CHECK27-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 18778 // CHECK27-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 18779 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 18780 // CHECK27-NEXT: [[TMP:%.*]] = alloca i64, align 4 18781 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 18782 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 18783 // CHECK27-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 18784 // CHECK27-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18785 // CHECK27-NEXT: [[IT:%.*]] = alloca i64, align 8 18786 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18787 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18788 // CHECK27-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 18789 // CHECK27-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 18790 // CHECK27-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 18791 // CHECK27-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 18792 // CHECK27-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 18793 // CHECK27-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 18794 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 18795 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 18796 // CHECK27-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 18797 // CHECK27-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 18798 // CHECK27-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 18799 // CHECK27-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 18800 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18801 // CHECK27-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 18802 // CHECK27-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 18803 // CHECK27-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 18804 // CHECK27-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18805 // CHECK27-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 18806 // CHECK27-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18807 // CHECK27: cond.true: 18808 // CHECK27-NEXT: br label [[COND_END:%.*]] 18809 // CHECK27: cond.false: 18810 // CHECK27-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18811 // CHECK27-NEXT: br label [[COND_END]] 18812 // CHECK27: cond.end: 18813 // CHECK27-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 18814 // CHECK27-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 18815 // CHECK27-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 18816 // CHECK27-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 18817 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18818 // CHECK27: omp.inner.for.cond: 18819 // CHECK27-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18820 // CHECK27-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18821 // CHECK27-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 18822 // CHECK27-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18823 // CHECK27: omp.inner.for.body: 18824 // CHECK27-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18825 // CHECK27-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 18826 // CHECK27-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 18827 // CHECK27-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 18828 // CHECK27-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4 18829 // CHECK27-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 18830 // CHECK27-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 18831 // CHECK27-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 18832 // CHECK27-NEXT: store double [[ADD]], double* [[A]], align 4 18833 // CHECK27-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 18834 // CHECK27-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4 18835 // CHECK27-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 18836 // CHECK27-NEXT: store double [[INC]], double* [[A4]], align 4 18837 // CHECK27-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 18838 // CHECK27-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 18839 // CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 18840 // CHECK27-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 18841 // CHECK27-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2 18842 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18843 // CHECK27: omp.body.continue: 18844 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18845 // CHECK27: omp.inner.for.inc: 18846 // CHECK27-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18847 // CHECK27-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 18848 // CHECK27-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 18849 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]] 18850 // CHECK27: omp.inner.for.end: 18851 // CHECK27-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18852 // CHECK27: omp.loop.exit: 18853 // CHECK27-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 18854 // CHECK27-NEXT: ret void 18855 // 18856 // 18857 // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 18858 // CHECK27-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 18859 // CHECK27-NEXT: entry: 18860 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18861 // CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18862 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 18863 // CHECK27-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 18864 // CHECK27-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 18865 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18866 // CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18867 // CHECK27-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 18868 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 18869 // CHECK27-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 18870 // CHECK27-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 18871 // CHECK27-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 18872 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 18873 // CHECK27-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 18874 // CHECK27-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 18875 // CHECK27-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 18876 // CHECK27-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 18877 // CHECK27-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 18878 // CHECK27-NEXT: ret void 18879 // 18880 // 18881 // CHECK27-LABEL: define {{[^@]+}}@.omp_outlined..6 18882 // CHECK27-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 18883 // CHECK27-NEXT: entry: 18884 // CHECK27-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18885 // CHECK27-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18886 // CHECK27-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 18887 // CHECK27-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 18888 // CHECK27-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 18889 // CHECK27-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 18890 // CHECK27-NEXT: [[TMP:%.*]] = alloca i64, align 4 18891 // CHECK27-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 18892 // CHECK27-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 18893 // CHECK27-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 18894 // CHECK27-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18895 // CHECK27-NEXT: [[I:%.*]] = alloca i64, align 8 18896 // CHECK27-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18897 // CHECK27-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18898 // CHECK27-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 18899 // CHECK27-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 18900 // CHECK27-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 18901 // CHECK27-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 18902 // CHECK27-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 18903 // CHECK27-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 18904 // CHECK27-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 18905 // CHECK27-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 18906 // CHECK27-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18907 // CHECK27-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 18908 // CHECK27-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 18909 // CHECK27-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 18910 // CHECK27-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18911 // CHECK27-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 18912 // CHECK27-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18913 // CHECK27: cond.true: 18914 // CHECK27-NEXT: br label [[COND_END:%.*]] 18915 // CHECK27: cond.false: 18916 // CHECK27-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18917 // CHECK27-NEXT: br label [[COND_END]] 18918 // CHECK27: cond.end: 18919 // CHECK27-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 18920 // CHECK27-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 18921 // CHECK27-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 18922 // CHECK27-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 18923 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 18924 // CHECK27: omp.inner.for.cond: 18925 // CHECK27-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18926 // CHECK27-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 18927 // CHECK27-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 18928 // CHECK27-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 18929 // CHECK27: omp.inner.for.body: 18930 // CHECK27-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18931 // CHECK27-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 18932 // CHECK27-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 18933 // CHECK27-NEXT: store i64 [[ADD]], i64* [[I]], align 8 18934 // CHECK27-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 18935 // CHECK27-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 18936 // CHECK27-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4 18937 // CHECK27-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4 18938 // CHECK27-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 18939 // CHECK27-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 18940 // CHECK27-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 18941 // CHECK27-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4 18942 // CHECK27-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 18943 // CHECK27-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 18944 // CHECK27-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 18945 // CHECK27-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 18946 // CHECK27-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 18947 // CHECK27: omp.body.continue: 18948 // CHECK27-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 18949 // CHECK27: omp.inner.for.inc: 18950 // CHECK27-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 18951 // CHECK27-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 18952 // CHECK27-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 18953 // CHECK27-NEXT: br label [[OMP_INNER_FOR_COND]] 18954 // CHECK27: omp.inner.for.end: 18955 // CHECK27-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 18956 // CHECK27: omp.loop.exit: 18957 // CHECK27-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 18958 // CHECK27-NEXT: ret void 18959 // 18960 // 18961 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 18962 // CHECK28-SAME: () #[[ATTR0:[0-9]+]] { 18963 // CHECK28-NEXT: entry: 18964 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 18965 // CHECK28-NEXT: ret void 18966 // 18967 // 18968 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined. 18969 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]]) #[[ATTR1:[0-9]+]] { 18970 // CHECK28-NEXT: entry: 18971 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 18972 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 18973 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 18974 // CHECK28-NEXT: [[TMP:%.*]] = alloca i32, align 4 18975 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 18976 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 18977 // CHECK28-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 18978 // CHECK28-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 18979 // CHECK28-NEXT: [[I:%.*]] = alloca i32, align 4 18980 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 18981 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 18982 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 18983 // CHECK28-NEXT: store i32 5, i32* [[DOTOMP_UB]], align 4 18984 // CHECK28-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 18985 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 18986 // CHECK28-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 18987 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 18988 // CHECK28-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 18989 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18990 // CHECK28-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 5 18991 // CHECK28-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 18992 // CHECK28: cond.true: 18993 // CHECK28-NEXT: br label [[COND_END:%.*]] 18994 // CHECK28: cond.false: 18995 // CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 18996 // CHECK28-NEXT: br label [[COND_END]] 18997 // CHECK28: cond.end: 18998 // CHECK28-NEXT: [[COND:%.*]] = phi i32 [ 5, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 18999 // CHECK28-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 19000 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 19001 // CHECK28-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 19002 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19003 // CHECK28: omp.inner.for.cond: 19004 // CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19005 // CHECK28-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19006 // CHECK28-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 19007 // CHECK28-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19008 // CHECK28: omp.inner.for.body: 19009 // CHECK28-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19010 // CHECK28-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 5 19011 // CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 3, [[MUL]] 19012 // CHECK28-NEXT: store i32 [[ADD]], i32* [[I]], align 4 19013 // CHECK28-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_cancel(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 19014 // CHECK28-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 19015 // CHECK28-NEXT: br i1 [[TMP9]], label [[DOTCANCEL_EXIT:%.*]], label [[DOTCANCEL_CONTINUE:%.*]] 19016 // CHECK28: .cancel.exit: 19017 // CHECK28-NEXT: br label [[CANCEL_EXIT:%.*]] 19018 // CHECK28: .cancel.continue: 19019 // CHECK28-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_cancellationpoint(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 2) 19020 // CHECK28-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 19021 // CHECK28-NEXT: br i1 [[TMP11]], label [[DOTCANCEL_EXIT2:%.*]], label [[DOTCANCEL_CONTINUE3:%.*]] 19022 // CHECK28: .cancel.exit2: 19023 // CHECK28-NEXT: br label [[CANCEL_EXIT]] 19024 // CHECK28: .cancel.continue3: 19025 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19026 // CHECK28: omp.body.continue: 19027 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19028 // CHECK28: omp.inner.for.inc: 19029 // CHECK28-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19030 // CHECK28-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 19031 // CHECK28-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 19032 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND]] 19033 // CHECK28: omp.inner.for.end: 19034 // CHECK28-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19035 // CHECK28: omp.loop.exit: 19036 // CHECK28-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 19037 // CHECK28-NEXT: br label [[CANCEL_CONT:%.*]] 19038 // CHECK28: cancel.cont: 19039 // CHECK28-NEXT: ret void 19040 // CHECK28: cancel.exit: 19041 // CHECK28-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 19042 // CHECK28-NEXT: br label [[CANCEL_CONT]] 19043 // 19044 // 19045 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l138 19046 // CHECK28-SAME: (i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR0]] { 19047 // CHECK28-NEXT: entry: 19048 // CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19049 // CHECK28-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 19050 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19051 // CHECK28-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 19052 // CHECK28-NEXT: [[LIN_CASTED:%.*]] = alloca i32, align 4 19053 // CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19054 // CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19055 // CHECK28-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 19056 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19057 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19058 // CHECK28-NEXT: [[TMP0:%.*]] = load i16, i16* [[CONV]], align 4 19059 // CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 19060 // CHECK28-NEXT: store i16 [[TMP0]], i16* [[CONV1]], align 2 19061 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[AA_CASTED]], align 4 19062 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 19063 // CHECK28-NEXT: store i32 [[TMP2]], i32* [[LIN_CASTED]], align 4 19064 // CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[LIN_CASTED]], align 4 19065 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[A_ADDR]], align 4 19066 // CHECK28-NEXT: store i32 [[TMP4]], i32* [[A_CASTED]], align 4 19067 // CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[A_CASTED]], align 4 19068 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]], i32 [[TMP5]]) 19069 // CHECK28-NEXT: ret void 19070 // 19071 // 19072 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..1 19073 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[AA:%.*]], i32 [[LIN:%.*]], i32 [[A:%.*]]) #[[ATTR1]] { 19074 // CHECK28-NEXT: entry: 19075 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19076 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19077 // CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19078 // CHECK28-NEXT: [[LIN_ADDR:%.*]] = alloca i32, align 4 19079 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19080 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 19081 // CHECK28-NEXT: [[TMP:%.*]] = alloca i64, align 4 19082 // CHECK28-NEXT: [[DOTLINEAR_START:%.*]] = alloca i32, align 4 19083 // CHECK28-NEXT: [[DOTLINEAR_START1:%.*]] = alloca i32, align 4 19084 // CHECK28-NEXT: [[DOTLINEAR_STEP:%.*]] = alloca i64, align 8 19085 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 19086 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 19087 // CHECK28-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 19088 // CHECK28-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19089 // CHECK28-NEXT: [[IT:%.*]] = alloca i64, align 8 19090 // CHECK28-NEXT: [[LIN2:%.*]] = alloca i32, align 4 19091 // CHECK28-NEXT: [[A3:%.*]] = alloca i32, align 4 19092 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19093 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19094 // CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19095 // CHECK28-NEXT: store i32 [[LIN]], i32* [[LIN_ADDR]], align 4 19096 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19097 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19098 // CHECK28-NEXT: [[TMP0:%.*]] = load i32, i32* [[LIN_ADDR]], align 4 19099 // CHECK28-NEXT: store i32 [[TMP0]], i32* [[DOTLINEAR_START]], align 4 19100 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 19101 // CHECK28-NEXT: store i32 [[TMP1]], i32* [[DOTLINEAR_START1]], align 4 19102 // CHECK28-NEXT: [[CALL:%.*]] = call i64 @_Z7get_valv() #[[ATTR5:[0-9]+]] 19103 // CHECK28-NEXT: store i64 [[CALL]], i64* [[DOTLINEAR_STEP]], align 8 19104 // CHECK28-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 19105 // CHECK28-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 19106 // CHECK28-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 19107 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19108 // CHECK28-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19109 // CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 19110 // CHECK28-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP3]]) 19111 // CHECK28-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 19112 // CHECK28-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19113 // CHECK28-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP4]], 3 19114 // CHECK28-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19115 // CHECK28: cond.true: 19116 // CHECK28-NEXT: br label [[COND_END:%.*]] 19117 // CHECK28: cond.false: 19118 // CHECK28-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19119 // CHECK28-NEXT: br label [[COND_END]] 19120 // CHECK28: cond.end: 19121 // CHECK28-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 19122 // CHECK28-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 19123 // CHECK28-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 19124 // CHECK28-NEXT: store i64 [[TMP6]], i64* [[DOTOMP_IV]], align 8 19125 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19126 // CHECK28: omp.inner.for.cond: 19127 // CHECK28-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19128 // CHECK28-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19129 // CHECK28-NEXT: [[CMP4:%.*]] = icmp ule i64 [[TMP7]], [[TMP8]] 19130 // CHECK28-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19131 // CHECK28: omp.inner.for.body: 19132 // CHECK28-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19133 // CHECK28-NEXT: [[MUL:%.*]] = mul i64 [[TMP9]], 400 19134 // CHECK28-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 19135 // CHECK28-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 19136 // CHECK28-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 19137 // CHECK28-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64 19138 // CHECK28-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19139 // CHECK28-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 19140 // CHECK28-NEXT: [[MUL6:%.*]] = mul i64 [[TMP11]], [[TMP12]] 19141 // CHECK28-NEXT: [[ADD:%.*]] = add i64 [[CONV5]], [[MUL6]] 19142 // CHECK28-NEXT: [[CONV7:%.*]] = trunc i64 [[ADD]] to i32 19143 // CHECK28-NEXT: store i32 [[CONV7]], i32* [[LIN2]], align 4 19144 // CHECK28-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 19145 // CHECK28-NEXT: [[CONV8:%.*]] = sext i32 [[TMP13]] to i64 19146 // CHECK28-NEXT: [[TMP14:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19147 // CHECK28-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 19148 // CHECK28-NEXT: [[MUL9:%.*]] = mul i64 [[TMP14]], [[TMP15]] 19149 // CHECK28-NEXT: [[ADD10:%.*]] = add i64 [[CONV8]], [[MUL9]] 19150 // CHECK28-NEXT: [[CONV11:%.*]] = trunc i64 [[ADD10]] to i32 19151 // CHECK28-NEXT: store i32 [[CONV11]], i32* [[A3]], align 4 19152 // CHECK28-NEXT: [[TMP16:%.*]] = load i16, i16* [[CONV]], align 4 19153 // CHECK28-NEXT: [[CONV12:%.*]] = sext i16 [[TMP16]] to i32 19154 // CHECK28-NEXT: [[ADD13:%.*]] = add nsw i32 [[CONV12]], 1 19155 // CHECK28-NEXT: [[CONV14:%.*]] = trunc i32 [[ADD13]] to i16 19156 // CHECK28-NEXT: store i16 [[CONV14]], i16* [[CONV]], align 4 19157 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19158 // CHECK28: omp.body.continue: 19159 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19160 // CHECK28: omp.inner.for.inc: 19161 // CHECK28-NEXT: [[TMP17:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19162 // CHECK28-NEXT: [[ADD15:%.*]] = add i64 [[TMP17]], 1 19163 // CHECK28-NEXT: store i64 [[ADD15]], i64* [[DOTOMP_IV]], align 8 19164 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND]] 19165 // CHECK28: omp.inner.for.end: 19166 // CHECK28-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19167 // CHECK28: omp.loop.exit: 19168 // CHECK28-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 19169 // CHECK28-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 19170 // CHECK28-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0 19171 // CHECK28-NEXT: br i1 [[TMP19]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]] 19172 // CHECK28: .omp.linear.pu: 19173 // CHECK28-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4 19174 // CHECK28-NEXT: [[CONV16:%.*]] = sext i32 [[TMP20]] to i64 19175 // CHECK28-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 19176 // CHECK28-NEXT: [[MUL17:%.*]] = mul i64 4, [[TMP21]] 19177 // CHECK28-NEXT: [[ADD18:%.*]] = add i64 [[CONV16]], [[MUL17]] 19178 // CHECK28-NEXT: [[CONV19:%.*]] = trunc i64 [[ADD18]] to i32 19179 // CHECK28-NEXT: store i32 [[CONV19]], i32* [[LIN_ADDR]], align 4 19180 // CHECK28-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4 19181 // CHECK28-NEXT: [[CONV20:%.*]] = sext i32 [[TMP22]] to i64 19182 // CHECK28-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTLINEAR_STEP]], align 8 19183 // CHECK28-NEXT: [[MUL21:%.*]] = mul i64 4, [[TMP23]] 19184 // CHECK28-NEXT: [[ADD22:%.*]] = add i64 [[CONV20]], [[MUL21]] 19185 // CHECK28-NEXT: [[CONV23:%.*]] = trunc i64 [[ADD22]] to i32 19186 // CHECK28-NEXT: store i32 [[CONV23]], i32* [[A_ADDR]], align 4 19187 // CHECK28-NEXT: br label [[DOTOMP_LINEAR_PU_DONE]] 19188 // CHECK28: .omp.linear.pu.done: 19189 // CHECK28-NEXT: ret void 19190 // 19191 // 19192 // CHECK28-LABEL: define {{[^@]+}}@_Z7get_valv 19193 // CHECK28-SAME: () #[[ATTR3:[0-9]+]] { 19194 // CHECK28-NEXT: entry: 19195 // CHECK28-NEXT: ret i64 0 19196 // 19197 // 19198 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l146 19199 // CHECK28-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR0]] { 19200 // CHECK28-NEXT: entry: 19201 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19202 // CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19203 // CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19204 // CHECK28-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 19205 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19206 // CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19207 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19208 // CHECK28-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 19209 // CHECK28-NEXT: store i32 [[TMP0]], i32* [[A_CASTED]], align 4 19210 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_CASTED]], align 4 19211 // CHECK28-NEXT: [[TMP2:%.*]] = load i16, i16* [[CONV]], align 4 19212 // CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 19213 // CHECK28-NEXT: store i16 [[TMP2]], i16* [[CONV1]], align 2 19214 // CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[AA_CASTED]], align 4 19215 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32 [[TMP3]]) 19216 // CHECK28-NEXT: ret void 19217 // 19218 // 19219 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..2 19220 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]]) #[[ATTR1]] { 19221 // CHECK28-NEXT: entry: 19222 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19223 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19224 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19225 // CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19226 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 19227 // CHECK28-NEXT: [[TMP:%.*]] = alloca i16, align 2 19228 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 19229 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 19230 // CHECK28-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 19231 // CHECK28-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19232 // CHECK28-NEXT: [[IT:%.*]] = alloca i16, align 2 19233 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19234 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19235 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19236 // CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19237 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19238 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 19239 // CHECK28-NEXT: store i32 3, i32* [[DOTOMP_UB]], align 4 19240 // CHECK28-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 19241 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19242 // CHECK28-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19243 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 19244 // CHECK28-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 19245 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19246 // CHECK28-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 3 19247 // CHECK28-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19248 // CHECK28: cond.true: 19249 // CHECK28-NEXT: br label [[COND_END:%.*]] 19250 // CHECK28: cond.false: 19251 // CHECK28-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19252 // CHECK28-NEXT: br label [[COND_END]] 19253 // CHECK28: cond.end: 19254 // CHECK28-NEXT: [[COND:%.*]] = phi i32 [ 3, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 19255 // CHECK28-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 19256 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 19257 // CHECK28-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 19258 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19259 // CHECK28: omp.inner.for.cond: 19260 // CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19261 // CHECK28-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19262 // CHECK28-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 19263 // CHECK28-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19264 // CHECK28: omp.inner.for.body: 19265 // CHECK28-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19266 // CHECK28-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 4 19267 // CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 6, [[MUL]] 19268 // CHECK28-NEXT: [[CONV2:%.*]] = trunc i32 [[ADD]] to i16 19269 // CHECK28-NEXT: store i16 [[CONV2]], i16* [[IT]], align 2 19270 // CHECK28-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 19271 // CHECK28-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1 19272 // CHECK28-NEXT: store i32 [[ADD3]], i32* [[A_ADDR]], align 4 19273 // CHECK28-NEXT: [[TMP9:%.*]] = load i16, i16* [[CONV]], align 4 19274 // CHECK28-NEXT: [[CONV4:%.*]] = sext i16 [[TMP9]] to i32 19275 // CHECK28-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV4]], 1 19276 // CHECK28-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16 19277 // CHECK28-NEXT: store i16 [[CONV6]], i16* [[CONV]], align 4 19278 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19279 // CHECK28: omp.body.continue: 19280 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19281 // CHECK28: omp.inner.for.inc: 19282 // CHECK28-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19283 // CHECK28-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP10]], 1 19284 // CHECK28-NEXT: store i32 [[ADD7]], i32* [[DOTOMP_IV]], align 4 19285 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND]] 19286 // CHECK28: omp.inner.for.end: 19287 // CHECK28-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19288 // CHECK28: omp.loop.exit: 19289 // CHECK28-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 19290 // CHECK28-NEXT: ret void 19291 // 19292 // 19293 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170 19294 // CHECK28-SAME: (i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR0]] { 19295 // CHECK28-NEXT: entry: 19296 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19297 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 19298 // CHECK28-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 19299 // CHECK28-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 19300 // CHECK28-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 19301 // CHECK28-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 19302 // CHECK28-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 19303 // CHECK28-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 19304 // CHECK28-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 19305 // CHECK28-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 19306 // CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19307 // CHECK28-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 19308 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19309 // CHECK28-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 19310 // CHECK28-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 19311 // CHECK28-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 19312 // CHECK28-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 19313 // CHECK28-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 19314 // CHECK28-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 19315 // CHECK28-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 19316 // CHECK28-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 19317 // CHECK28-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 19318 // CHECK28-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 19319 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 19320 // CHECK28-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 19321 // CHECK28-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 19322 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 19323 // CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 19324 // CHECK28-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 19325 // CHECK28-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 19326 // CHECK28-NEXT: [[TMP8:%.*]] = load i32, i32* [[A_ADDR]], align 4 19327 // CHECK28-NEXT: store i32 [[TMP8]], i32* [[A_CASTED]], align 4 19328 // CHECK28-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_CASTED]], align 4 19329 // CHECK28-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 19330 // CHECK28-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 19331 // CHECK28-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 19332 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 10, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x float]*, i32, float*, [5 x [10 x double]]*, i32, i32, double*, %struct.TT*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP9]], [10 x float]* [[TMP0]], i32 [[TMP1]], float* [[TMP2]], [5 x [10 x double]]* [[TMP3]], i32 [[TMP4]], i32 [[TMP5]], double* [[TMP6]], %struct.TT* [[TMP7]], i32 [[TMP11]]) 19333 // CHECK28-NEXT: ret void 19334 // 19335 // 19336 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..3 19337 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], [10 x float]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32 [[VLA:%.*]], float* nonnull align 4 dereferenceable(4) [[BN:%.*]], [5 x [10 x double]]* nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[VLA1:%.*]], i32 [[VLA3:%.*]], double* nonnull align 4 dereferenceable(8) [[CN:%.*]], %struct.TT* nonnull align 4 dereferenceable(12) [[D:%.*]], i32 [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 19338 // CHECK28-NEXT: entry: 19339 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19340 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19341 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19342 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x float]*, align 4 19343 // CHECK28-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 19344 // CHECK28-NEXT: [[BN_ADDR:%.*]] = alloca float*, align 4 19345 // CHECK28-NEXT: [[C_ADDR:%.*]] = alloca [5 x [10 x double]]*, align 4 19346 // CHECK28-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 19347 // CHECK28-NEXT: [[VLA_ADDR4:%.*]] = alloca i32, align 4 19348 // CHECK28-NEXT: [[CN_ADDR:%.*]] = alloca double*, align 4 19349 // CHECK28-NEXT: [[D_ADDR:%.*]] = alloca %struct.TT*, align 4 19350 // CHECK28-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 19351 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 19352 // CHECK28-NEXT: [[TMP:%.*]] = alloca i8, align 1 19353 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 19354 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 19355 // CHECK28-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 19356 // CHECK28-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19357 // CHECK28-NEXT: [[IT:%.*]] = alloca i8, align 1 19358 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19359 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19360 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19361 // CHECK28-NEXT: store [10 x float]* [[B]], [10 x float]** [[B_ADDR]], align 4 19362 // CHECK28-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 19363 // CHECK28-NEXT: store float* [[BN]], float** [[BN_ADDR]], align 4 19364 // CHECK28-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[C_ADDR]], align 4 19365 // CHECK28-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 19366 // CHECK28-NEXT: store i32 [[VLA3]], i32* [[VLA_ADDR4]], align 4 19367 // CHECK28-NEXT: store double* [[CN]], double** [[CN_ADDR]], align 4 19368 // CHECK28-NEXT: store %struct.TT* [[D]], %struct.TT** [[D_ADDR]], align 4 19369 // CHECK28-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 19370 // CHECK28-NEXT: [[TMP0:%.*]] = load [10 x float]*, [10 x float]** [[B_ADDR]], align 4 19371 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 19372 // CHECK28-NEXT: [[TMP2:%.*]] = load float*, float** [[BN_ADDR]], align 4 19373 // CHECK28-NEXT: [[TMP3:%.*]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[C_ADDR]], align 4 19374 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 19375 // CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[VLA_ADDR4]], align 4 19376 // CHECK28-NEXT: [[TMP6:%.*]] = load double*, double** [[CN_ADDR]], align 4 19377 // CHECK28-NEXT: [[TMP7:%.*]] = load %struct.TT*, %struct.TT** [[D_ADDR]], align 4 19378 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 19379 // CHECK28-NEXT: store i32 25, i32* [[DOTOMP_UB]], align 4 19380 // CHECK28-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 19381 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19382 // CHECK28-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 19383 // CHECK28-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19384 // CHECK28-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 19385 // CHECK28-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP8]]) 19386 // CHECK28-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 19387 // CHECK28: omp.dispatch.cond: 19388 // CHECK28-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19389 // CHECK28-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP11]], 25 19390 // CHECK28-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19391 // CHECK28: cond.true: 19392 // CHECK28-NEXT: br label [[COND_END:%.*]] 19393 // CHECK28: cond.false: 19394 // CHECK28-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19395 // CHECK28-NEXT: br label [[COND_END]] 19396 // CHECK28: cond.end: 19397 // CHECK28-NEXT: [[COND:%.*]] = phi i32 [ 25, [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 19398 // CHECK28-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 19399 // CHECK28-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 19400 // CHECK28-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 19401 // CHECK28-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19402 // CHECK28-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19403 // CHECK28-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 19404 // CHECK28-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 19405 // CHECK28: omp.dispatch.body: 19406 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19407 // CHECK28: omp.inner.for.cond: 19408 // CHECK28-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19409 // CHECK28-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19410 // CHECK28-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]] 19411 // CHECK28-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19412 // CHECK28: omp.inner.for.body: 19413 // CHECK28-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19414 // CHECK28-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1 19415 // CHECK28-NEXT: [[SUB:%.*]] = sub nsw i32 122, [[MUL]] 19416 // CHECK28-NEXT: [[CONV:%.*]] = trunc i32 [[SUB]] to i8 19417 // CHECK28-NEXT: store i8 [[CONV]], i8* [[IT]], align 1 19418 // CHECK28-NEXT: [[TMP19:%.*]] = load i32, i32* [[A_ADDR]], align 4 19419 // CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], 1 19420 // CHECK28-NEXT: store i32 [[ADD]], i32* [[A_ADDR]], align 4 19421 // CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x float], [10 x float]* [[TMP0]], i32 0, i32 2 19422 // CHECK28-NEXT: [[TMP20:%.*]] = load float, float* [[ARRAYIDX]], align 4 19423 // CHECK28-NEXT: [[CONV7:%.*]] = fpext float [[TMP20]] to double 19424 // CHECK28-NEXT: [[ADD8:%.*]] = fadd double [[CONV7]], 1.000000e+00 19425 // CHECK28-NEXT: [[CONV9:%.*]] = fptrunc double [[ADD8]] to float 19426 // CHECK28-NEXT: store float [[CONV9]], float* [[ARRAYIDX]], align 4 19427 // CHECK28-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds float, float* [[TMP2]], i32 3 19428 // CHECK28-NEXT: [[TMP21:%.*]] = load float, float* [[ARRAYIDX10]], align 4 19429 // CHECK28-NEXT: [[CONV11:%.*]] = fpext float [[TMP21]] to double 19430 // CHECK28-NEXT: [[ADD12:%.*]] = fadd double [[CONV11]], 1.000000e+00 19431 // CHECK28-NEXT: [[CONV13:%.*]] = fptrunc double [[ADD12]] to float 19432 // CHECK28-NEXT: store float [[CONV13]], float* [[ARRAYIDX10]], align 4 19433 // CHECK28-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[TMP3]], i32 0, i32 1 19434 // CHECK28-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX14]], i32 0, i32 2 19435 // CHECK28-NEXT: [[TMP22:%.*]] = load double, double* [[ARRAYIDX15]], align 8 19436 // CHECK28-NEXT: [[ADD16:%.*]] = fadd double [[TMP22]], 1.000000e+00 19437 // CHECK28-NEXT: store double [[ADD16]], double* [[ARRAYIDX15]], align 8 19438 // CHECK28-NEXT: [[TMP23:%.*]] = mul nsw i32 1, [[TMP5]] 19439 // CHECK28-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds double, double* [[TMP6]], i32 [[TMP23]] 19440 // CHECK28-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYIDX17]], i32 3 19441 // CHECK28-NEXT: [[TMP24:%.*]] = load double, double* [[ARRAYIDX18]], align 8 19442 // CHECK28-NEXT: [[ADD19:%.*]] = fadd double [[TMP24]], 1.000000e+00 19443 // CHECK28-NEXT: store double [[ADD19]], double* [[ARRAYIDX18]], align 8 19444 // CHECK28-NEXT: [[X:%.*]] = getelementptr inbounds [[STRUCT_TT:%.*]], %struct.TT* [[TMP7]], i32 0, i32 0 19445 // CHECK28-NEXT: [[TMP25:%.*]] = load i64, i64* [[X]], align 4 19446 // CHECK28-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP25]], 1 19447 // CHECK28-NEXT: store i64 [[ADD20]], i64* [[X]], align 4 19448 // CHECK28-NEXT: [[Y:%.*]] = getelementptr inbounds [[STRUCT_TT]], %struct.TT* [[TMP7]], i32 0, i32 1 19449 // CHECK28-NEXT: [[TMP26:%.*]] = load i8, i8* [[Y]], align 4 19450 // CHECK28-NEXT: [[CONV21:%.*]] = sext i8 [[TMP26]] to i32 19451 // CHECK28-NEXT: [[ADD22:%.*]] = add nsw i32 [[CONV21]], 1 19452 // CHECK28-NEXT: [[CONV23:%.*]] = trunc i32 [[ADD22]] to i8 19453 // CHECK28-NEXT: store i8 [[CONV23]], i8* [[Y]], align 4 19454 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19455 // CHECK28: omp.body.continue: 19456 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19457 // CHECK28: omp.inner.for.inc: 19458 // CHECK28-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 19459 // CHECK28-NEXT: [[ADD24:%.*]] = add nsw i32 [[TMP27]], 1 19460 // CHECK28-NEXT: store i32 [[ADD24]], i32* [[DOTOMP_IV]], align 4 19461 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND]] 19462 // CHECK28: omp.inner.for.end: 19463 // CHECK28-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 19464 // CHECK28: omp.dispatch.inc: 19465 // CHECK28-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 19466 // CHECK28-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 19467 // CHECK28-NEXT: [[ADD25:%.*]] = add nsw i32 [[TMP28]], [[TMP29]] 19468 // CHECK28-NEXT: store i32 [[ADD25]], i32* [[DOTOMP_LB]], align 4 19469 // CHECK28-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 19470 // CHECK28-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 19471 // CHECK28-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 19472 // CHECK28-NEXT: store i32 [[ADD26]], i32* [[DOTOMP_UB]], align 4 19473 // CHECK28-NEXT: br label [[OMP_DISPATCH_COND]] 19474 // CHECK28: omp.dispatch.end: 19475 // CHECK28-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]]) 19476 // CHECK28-NEXT: ret void 19477 // 19478 // 19479 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224 19480 // CHECK28-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 19481 // CHECK28-NEXT: entry: 19482 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19483 // CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19484 // CHECK28-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 19485 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 19486 // CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19487 // CHECK28-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 19488 // CHECK28-NEXT: [[AAA_CASTED:%.*]] = alloca i32, align 4 19489 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19490 // CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19491 // CHECK28-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 19492 // CHECK28-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 19493 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19494 // CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 19495 // CHECK28-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 19496 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 19497 // CHECK28-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 19498 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 19499 // CHECK28-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 19500 // CHECK28-NEXT: [[CONV2:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 19501 // CHECK28-NEXT: store i16 [[TMP3]], i16* [[CONV2]], align 2 19502 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 19503 // CHECK28-NEXT: [[TMP5:%.*]] = load i8, i8* [[CONV1]], align 4 19504 // CHECK28-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* 19505 // CHECK28-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 19506 // CHECK28-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 19507 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) 19508 // CHECK28-NEXT: ret void 19509 // 19510 // 19511 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..4 19512 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], i32 [[AAA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 19513 // CHECK28-NEXT: entry: 19514 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19515 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19516 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19517 // CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19518 // CHECK28-NEXT: [[AAA_ADDR:%.*]] = alloca i32, align 4 19519 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 19520 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 19521 // CHECK28-NEXT: [[TMP:%.*]] = alloca i32, align 4 19522 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19523 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19524 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19525 // CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19526 // CHECK28-NEXT: store i32 [[AAA]], i32* [[AAA_ADDR]], align 4 19527 // CHECK28-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 19528 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19529 // CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AAA_ADDR]] to i8* 19530 // CHECK28-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 19531 // CHECK28-NEXT: ret void 19532 // 19533 // 19534 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242 19535 // CHECK28-SAME: (%struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR0]] { 19536 // CHECK28-NEXT: entry: 19537 // CHECK28-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 19538 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 19539 // CHECK28-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 19540 // CHECK28-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 19541 // CHECK28-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 19542 // CHECK28-NEXT: [[B_CASTED:%.*]] = alloca i32, align 4 19543 // CHECK28-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 19544 // CHECK28-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 19545 // CHECK28-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 19546 // CHECK28-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 19547 // CHECK28-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 19548 // CHECK28-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 19549 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 19550 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 19551 // CHECK28-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 19552 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 19553 // CHECK28-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 19554 // CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 19555 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) 19556 // CHECK28-NEXT: ret void 19557 // 19558 // 19559 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..5 19560 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], %struct.S1* [[THIS:%.*]], i32 [[B:%.*]], i32 [[VLA:%.*]], i32 [[VLA1:%.*]], i16* nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR1]] { 19561 // CHECK28-NEXT: entry: 19562 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19563 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19564 // CHECK28-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S1*, align 4 19565 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 19566 // CHECK28-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 19567 // CHECK28-NEXT: [[VLA_ADDR2:%.*]] = alloca i32, align 4 19568 // CHECK28-NEXT: [[C_ADDR:%.*]] = alloca i16*, align 4 19569 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 19570 // CHECK28-NEXT: [[TMP:%.*]] = alloca i64, align 4 19571 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 19572 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 19573 // CHECK28-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 19574 // CHECK28-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19575 // CHECK28-NEXT: [[IT:%.*]] = alloca i64, align 8 19576 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19577 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19578 // CHECK28-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 19579 // CHECK28-NEXT: store i32 [[B]], i32* [[B_ADDR]], align 4 19580 // CHECK28-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 19581 // CHECK28-NEXT: store i32 [[VLA1]], i32* [[VLA_ADDR2]], align 4 19582 // CHECK28-NEXT: store i16* [[C]], i16** [[C_ADDR]], align 4 19583 // CHECK28-NEXT: [[TMP0:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 19584 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 19585 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[VLA_ADDR2]], align 4 19586 // CHECK28-NEXT: [[TMP3:%.*]] = load i16*, i16** [[C_ADDR]], align 4 19587 // CHECK28-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 19588 // CHECK28-NEXT: store i64 3, i64* [[DOTOMP_UB]], align 8 19589 // CHECK28-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 19590 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19591 // CHECK28-NEXT: [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19592 // CHECK28-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4 19593 // CHECK28-NEXT: call void @__kmpc_for_static_init_8u(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 19594 // CHECK28-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19595 // CHECK28-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP6]], 3 19596 // CHECK28-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19597 // CHECK28: cond.true: 19598 // CHECK28-NEXT: br label [[COND_END:%.*]] 19599 // CHECK28: cond.false: 19600 // CHECK28-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19601 // CHECK28-NEXT: br label [[COND_END]] 19602 // CHECK28: cond.end: 19603 // CHECK28-NEXT: [[COND:%.*]] = phi i64 [ 3, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ] 19604 // CHECK28-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 19605 // CHECK28-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 19606 // CHECK28-NEXT: store i64 [[TMP8]], i64* [[DOTOMP_IV]], align 8 19607 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19608 // CHECK28: omp.inner.for.cond: 19609 // CHECK28-NEXT: [[TMP9:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19610 // CHECK28-NEXT: [[TMP10:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19611 // CHECK28-NEXT: [[CMP3:%.*]] = icmp ule i64 [[TMP9]], [[TMP10]] 19612 // CHECK28-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19613 // CHECK28: omp.inner.for.body: 19614 // CHECK28-NEXT: [[TMP11:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19615 // CHECK28-NEXT: [[MUL:%.*]] = mul i64 [[TMP11]], 400 19616 // CHECK28-NEXT: [[SUB:%.*]] = sub i64 2000, [[MUL]] 19617 // CHECK28-NEXT: store i64 [[SUB]], i64* [[IT]], align 8 19618 // CHECK28-NEXT: [[TMP12:%.*]] = load i32, i32* [[B_ADDR]], align 4 19619 // CHECK28-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP12]] to double 19620 // CHECK28-NEXT: [[ADD:%.*]] = fadd double [[CONV]], 1.500000e+00 19621 // CHECK28-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], %struct.S1* [[TMP0]], i32 0, i32 0 19622 // CHECK28-NEXT: store double [[ADD]], double* [[A]], align 4 19623 // CHECK28-NEXT: [[A4:%.*]] = getelementptr inbounds [[STRUCT_S1]], %struct.S1* [[TMP0]], i32 0, i32 0 19624 // CHECK28-NEXT: [[TMP13:%.*]] = load double, double* [[A4]], align 4 19625 // CHECK28-NEXT: [[INC:%.*]] = fadd double [[TMP13]], 1.000000e+00 19626 // CHECK28-NEXT: store double [[INC]], double* [[A4]], align 4 19627 // CHECK28-NEXT: [[CONV5:%.*]] = fptosi double [[INC]] to i16 19628 // CHECK28-NEXT: [[TMP14:%.*]] = mul nsw i32 1, [[TMP2]] 19629 // CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[TMP3]], i32 [[TMP14]] 19630 // CHECK28-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 19631 // CHECK28-NEXT: store i16 [[CONV5]], i16* [[ARRAYIDX6]], align 2 19632 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19633 // CHECK28: omp.body.continue: 19634 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19635 // CHECK28: omp.inner.for.inc: 19636 // CHECK28-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19637 // CHECK28-NEXT: [[ADD7:%.*]] = add i64 [[TMP15]], 1 19638 // CHECK28-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 19639 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND]] 19640 // CHECK28: omp.inner.for.end: 19641 // CHECK28-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19642 // CHECK28: omp.loop.exit: 19643 // CHECK28-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]]) 19644 // CHECK28-NEXT: ret void 19645 // 19646 // 19647 // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207 19648 // CHECK28-SAME: (i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] { 19649 // CHECK28-NEXT: entry: 19650 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19651 // CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19652 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 19653 // CHECK28-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4 19654 // CHECK28-NEXT: [[AA_CASTED:%.*]] = alloca i32, align 4 19655 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19656 // CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19657 // CHECK28-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 19658 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19659 // CHECK28-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 19660 // CHECK28-NEXT: [[TMP1:%.*]] = load i32, i32* [[A_ADDR]], align 4 19661 // CHECK28-NEXT: store i32 [[TMP1]], i32* [[A_CASTED]], align 4 19662 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[A_CASTED]], align 4 19663 // CHECK28-NEXT: [[TMP3:%.*]] = load i16, i16* [[CONV]], align 4 19664 // CHECK28-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* 19665 // CHECK28-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 19666 // CHECK28-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 19667 // CHECK28-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) 19668 // CHECK28-NEXT: ret void 19669 // 19670 // 19671 // CHECK28-LABEL: define {{[^@]+}}@.omp_outlined..6 19672 // CHECK28-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i32 [[A:%.*]], i32 [[AA:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] { 19673 // CHECK28-NEXT: entry: 19674 // CHECK28-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 19675 // CHECK28-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 19676 // CHECK28-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 19677 // CHECK28-NEXT: [[AA_ADDR:%.*]] = alloca i32, align 4 19678 // CHECK28-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 4 19679 // CHECK28-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 19680 // CHECK28-NEXT: [[TMP:%.*]] = alloca i64, align 4 19681 // CHECK28-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 19682 // CHECK28-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 19683 // CHECK28-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 19684 // CHECK28-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 19685 // CHECK28-NEXT: [[I:%.*]] = alloca i64, align 8 19686 // CHECK28-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 19687 // CHECK28-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 19688 // CHECK28-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 19689 // CHECK28-NEXT: store i32 [[AA]], i32* [[AA_ADDR]], align 4 19690 // CHECK28-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 4 19691 // CHECK28-NEXT: [[CONV:%.*]] = bitcast i32* [[AA_ADDR]] to i16* 19692 // CHECK28-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 4 19693 // CHECK28-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 19694 // CHECK28-NEXT: store i64 6, i64* [[DOTOMP_UB]], align 8 19695 // CHECK28-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 19696 // CHECK28-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 19697 // CHECK28-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 19698 // CHECK28-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 19699 // CHECK28-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 19700 // CHECK28-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19701 // CHECK28-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP3]], 6 19702 // CHECK28-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 19703 // CHECK28: cond.true: 19704 // CHECK28-NEXT: br label [[COND_END:%.*]] 19705 // CHECK28: cond.false: 19706 // CHECK28-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19707 // CHECK28-NEXT: br label [[COND_END]] 19708 // CHECK28: cond.end: 19709 // CHECK28-NEXT: [[COND:%.*]] = phi i64 [ 6, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 19710 // CHECK28-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 19711 // CHECK28-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 19712 // CHECK28-NEXT: store i64 [[TMP5]], i64* [[DOTOMP_IV]], align 8 19713 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 19714 // CHECK28: omp.inner.for.cond: 19715 // CHECK28-NEXT: [[TMP6:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19716 // CHECK28-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 19717 // CHECK28-NEXT: [[CMP1:%.*]] = icmp sle i64 [[TMP6]], [[TMP7]] 19718 // CHECK28-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 19719 // CHECK28: omp.inner.for.body: 19720 // CHECK28-NEXT: [[TMP8:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19721 // CHECK28-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP8]], 3 19722 // CHECK28-NEXT: [[ADD:%.*]] = add nsw i64 -10, [[MUL]] 19723 // CHECK28-NEXT: store i64 [[ADD]], i64* [[I]], align 8 19724 // CHECK28-NEXT: [[TMP9:%.*]] = load i32, i32* [[A_ADDR]], align 4 19725 // CHECK28-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1 19726 // CHECK28-NEXT: store i32 [[ADD2]], i32* [[A_ADDR]], align 4 19727 // CHECK28-NEXT: [[TMP10:%.*]] = load i16, i16* [[CONV]], align 4 19728 // CHECK28-NEXT: [[CONV3:%.*]] = sext i16 [[TMP10]] to i32 19729 // CHECK28-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], 1 19730 // CHECK28-NEXT: [[CONV5:%.*]] = trunc i32 [[ADD4]] to i16 19731 // CHECK28-NEXT: store i16 [[CONV5]], i16* [[CONV]], align 4 19732 // CHECK28-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 2 19733 // CHECK28-NEXT: [[TMP11:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 19734 // CHECK28-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP11]], 1 19735 // CHECK28-NEXT: store i32 [[ADD6]], i32* [[ARRAYIDX]], align 4 19736 // CHECK28-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 19737 // CHECK28: omp.body.continue: 19738 // CHECK28-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 19739 // CHECK28: omp.inner.for.inc: 19740 // CHECK28-NEXT: [[TMP12:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 19741 // CHECK28-NEXT: [[ADD7:%.*]] = add nsw i64 [[TMP12]], 1 19742 // CHECK28-NEXT: store i64 [[ADD7]], i64* [[DOTOMP_IV]], align 8 19743 // CHECK28-NEXT: br label [[OMP_INNER_FOR_COND]] 19744 // CHECK28: omp.inner.for.end: 19745 // CHECK28-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 19746 // CHECK28: omp.loop.exit: 19747 // CHECK28-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 19748 // CHECK28-NEXT: ret void 19749 // 19750