1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // expected-no-diagnostics 3 #ifndef HEADER 4 #define HEADER 5 6 // Test host codegen. 7 // RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 8 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 9 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2 10 // RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3 11 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 12 // RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4 13 14 // RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5 15 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 16 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6 17 // RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7 18 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 19 // RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8 20 #ifdef CK1 21 22 template <typename T, int X, long long Y> 23 struct SS{ 24 T a[X]; 25 float b; 26 int foo(void) { 27 28 #pragma omp target 29 #pragma omp teams distribute simd 30 for(int i = 0; i < X; i++) { 31 a[i] = (T)0; 32 } 33 #pragma omp target 34 #pragma omp teams distribute simd dist_schedule(static) 35 for(int i = 0; i < X; i++) { 36 a[i] = (T)0; 37 } 38 #pragma omp target 39 #pragma omp teams distribute simd dist_schedule(static, X/2) 40 for(int i = 0; i < X; i++) { 41 a[i] = (T)0; 42 } 43 44 45 46 47 48 49 return a[0]; 50 } 51 }; 52 53 int teams_template_struct(void) { 54 SS<int, 123, 456> V; 55 return V.foo(); 56 57 } 58 #endif // CK1 59 60 // Test host codegen. 61 // RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9 62 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 63 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10 64 // RUN: %clang_cc1 -DCK2 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11 65 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 66 // RUN: %clang_cc1 -DCK2 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12 67 68 // RUN: %clang_cc1 -DCK2 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13 69 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 70 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14 71 // RUN: %clang_cc1 -DCK2 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15 72 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 73 // RUN: %clang_cc1 -DCK2 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16 74 #ifdef CK2 75 76 template <typename T, int n> 77 int tmain(T argc) { 78 T a[n]; 79 #pragma omp target 80 #pragma omp teams distribute simd 81 for(int i = 0; i < n; i++) { 82 a[i] = (T)0; 83 } 84 #pragma omp target 85 #pragma omp teams distribute simd dist_schedule(static) 86 for(int i = 0; i < n; i++) { 87 a[i] = (T)0; 88 } 89 #pragma omp target 90 #pragma omp teams distribute simd dist_schedule(static, n) 91 for(int i = 0; i < n; i++) { 92 a[i] = (T)0; 93 } 94 return 0; 95 } 96 97 int main (int argc, char **argv) { 98 int n = 100; 99 int a[n]; 100 #pragma omp target 101 #pragma omp teams distribute simd 102 for(int i = 0; i < n; i++) { 103 a[i] = 0; 104 } 105 #pragma omp target 106 #pragma omp teams distribute simd dist_schedule(static) 107 for(int i = 0; i < n; i++) { 108 a[i] = 0; 109 } 110 #pragma omp target 111 #pragma omp teams distribute simd dist_schedule(static, n) 112 for(int i = 0; i < n; i++) { 113 a[i] = 0; 114 } 115 return tmain<int, 10>(argc); 116 } 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 #endif // CK2 133 #endif // #ifndef HEADER 134 // CHECK1-LABEL: define {{[^@]+}}@_Z21teams_template_structv 135 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 136 // CHECK1-NEXT: entry: 137 // CHECK1-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4 138 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef [[V]]) 139 // CHECK1-NEXT: ret i32 [[CALL]] 140 // 141 // 142 // CHECK1-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv 143 // CHECK1-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR0]] comdat align 2 { 144 // CHECK1-NEXT: entry: 145 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 146 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 147 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 148 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 149 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 150 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8 151 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8 152 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8 153 // CHECK1-NEXT: [[_TMP6:%.*]] = alloca i32, align 4 154 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8 155 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8 156 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8 157 // CHECK1-NEXT: [[_TMP13:%.*]] = alloca i32, align 4 158 // CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 159 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 160 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0 161 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 162 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS** 163 // CHECK1-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8 164 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 165 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]** 166 // CHECK1-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8 167 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 168 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 169 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 170 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 171 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) 172 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 173 // CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 174 // CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 175 // CHECK1: omp_offload.failed: 176 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] 177 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 178 // CHECK1: omp_offload.cont: 179 // CHECK1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 180 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 181 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS** 182 // CHECK1-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8 183 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 184 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]** 185 // CHECK1-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8 186 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0 187 // CHECK1-NEXT: store i8* null, i8** [[TMP13]], align 8 188 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 189 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 190 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 123) 191 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 192 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 193 // CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] 194 // CHECK1: omp_offload.failed7: 195 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33(%struct.SS* [[THIS1]]) #[[ATTR2]] 196 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT8]] 197 // CHECK1: omp_offload.cont8: 198 // CHECK1-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 199 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 200 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS** 201 // CHECK1-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8 202 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 203 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]** 204 // CHECK1-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8 205 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 206 // CHECK1-NEXT: store i8* null, i8** [[TMP22]], align 8 207 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 208 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 209 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 123) 210 // CHECK1-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 211 // CHECK1-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 212 // CHECK1-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] 213 // CHECK1: omp_offload.failed14: 214 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38(%struct.SS* [[THIS1]]) #[[ATTR2]] 215 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT15]] 216 // CHECK1: omp_offload.cont15: 217 // CHECK1-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 218 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A16]], i64 0, i64 0 219 // CHECK1-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 220 // CHECK1-NEXT: ret i32 [[TMP27]] 221 // 222 // 223 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 224 // CHECK1-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] { 225 // CHECK1-NEXT: entry: 226 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 227 // CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 228 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 229 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 230 // CHECK1-NEXT: ret void 231 // 232 // 233 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 234 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 235 // CHECK1-NEXT: entry: 236 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 237 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 238 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 239 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 240 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 241 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 242 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 243 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 244 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 245 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 246 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 247 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 248 // CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 249 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 250 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 251 // CHECK1-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 252 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 253 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 254 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 255 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 256 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 257 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 258 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 259 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 260 // CHECK1: cond.true: 261 // CHECK1-NEXT: br label [[COND_END:%.*]] 262 // CHECK1: cond.false: 263 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 264 // CHECK1-NEXT: br label [[COND_END]] 265 // CHECK1: cond.end: 266 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 267 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 268 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 269 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 270 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 271 // CHECK1: omp.inner.for.cond: 272 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 273 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 274 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 275 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 276 // CHECK1: omp.inner.for.body: 277 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 278 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 279 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 280 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 281 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 282 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 283 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 284 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 285 // CHECK1-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !6 286 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 287 // CHECK1: omp.body.continue: 288 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 289 // CHECK1: omp.inner.for.inc: 290 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 291 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 292 // CHECK1-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 293 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 294 // CHECK1: omp.inner.for.end: 295 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 296 // CHECK1: omp.loop.exit: 297 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 298 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 299 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 300 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 301 // CHECK1: .omp.final.then: 302 // CHECK1-NEXT: store i32 123, i32* [[I]], align 4 303 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 304 // CHECK1: .omp.final.done: 305 // CHECK1-NEXT: ret void 306 // 307 // 308 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33 309 // CHECK1-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 310 // CHECK1-NEXT: entry: 311 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 312 // CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 313 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 314 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 315 // CHECK1-NEXT: ret void 316 // 317 // 318 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 319 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 320 // CHECK1-NEXT: entry: 321 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 322 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 323 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 324 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 325 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 326 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 327 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 328 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 329 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 330 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 331 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 332 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 333 // CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 334 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 335 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 336 // CHECK1-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 337 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 338 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 339 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 340 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 341 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 342 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 343 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 344 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 345 // CHECK1: cond.true: 346 // CHECK1-NEXT: br label [[COND_END:%.*]] 347 // CHECK1: cond.false: 348 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 349 // CHECK1-NEXT: br label [[COND_END]] 350 // CHECK1: cond.end: 351 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 352 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 353 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 354 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 355 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 356 // CHECK1: omp.inner.for.cond: 357 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 358 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 359 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 360 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 361 // CHECK1: omp.inner.for.body: 362 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 363 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 364 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 365 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 366 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 367 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 368 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 369 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 370 // CHECK1-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !12 371 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 372 // CHECK1: omp.body.continue: 373 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 374 // CHECK1: omp.inner.for.inc: 375 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 376 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 377 // CHECK1-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 378 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 379 // CHECK1: omp.inner.for.end: 380 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 381 // CHECK1: omp.loop.exit: 382 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 383 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 384 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 385 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 386 // CHECK1: .omp.final.then: 387 // CHECK1-NEXT: store i32 123, i32* [[I]], align 4 388 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 389 // CHECK1: .omp.final.done: 390 // CHECK1-NEXT: ret void 391 // 392 // 393 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38 394 // CHECK1-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 395 // CHECK1-NEXT: entry: 396 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 397 // CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 398 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 399 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 400 // CHECK1-NEXT: ret void 401 // 402 // 403 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4 404 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 405 // CHECK1-NEXT: entry: 406 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 407 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 408 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 409 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 410 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 411 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 412 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 413 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 414 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 415 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 416 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 417 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 418 // CHECK1-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 419 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 420 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 421 // CHECK1-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 422 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 423 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 424 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 425 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 426 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61) 427 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 428 // CHECK1: omp.dispatch.cond: 429 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 430 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 431 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 432 // CHECK1: cond.true: 433 // CHECK1-NEXT: br label [[COND_END:%.*]] 434 // CHECK1: cond.false: 435 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 436 // CHECK1-NEXT: br label [[COND_END]] 437 // CHECK1: cond.end: 438 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 439 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 440 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 441 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 442 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 443 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 444 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 445 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 446 // CHECK1: omp.dispatch.body: 447 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 448 // CHECK1: omp.inner.for.cond: 449 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 450 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15 451 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 452 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 453 // CHECK1: omp.inner.for.body: 454 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 455 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 456 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 457 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !15 458 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 459 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !15 460 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 461 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 462 // CHECK1-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !15 463 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 464 // CHECK1: omp.body.continue: 465 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 466 // CHECK1: omp.inner.for.inc: 467 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 468 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 469 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 470 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 471 // CHECK1: omp.inner.for.end: 472 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 473 // CHECK1: omp.dispatch.inc: 474 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 475 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 476 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 477 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4 478 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 479 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 480 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 481 // CHECK1-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4 482 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] 483 // CHECK1: omp.dispatch.end: 484 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 485 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 486 // CHECK1-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 487 // CHECK1-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 488 // CHECK1: .omp.final.then: 489 // CHECK1-NEXT: store i32 123, i32* [[I]], align 4 490 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 491 // CHECK1: .omp.final.done: 492 // CHECK1-NEXT: ret void 493 // 494 // 495 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 496 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] { 497 // CHECK1-NEXT: entry: 498 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 499 // CHECK1-NEXT: ret void 500 // 501 // 502 // CHECK2-LABEL: define {{[^@]+}}@_Z21teams_template_structv 503 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] { 504 // CHECK2-NEXT: entry: 505 // CHECK2-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4 506 // CHECK2-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef [[V]]) 507 // CHECK2-NEXT: ret i32 [[CALL]] 508 // 509 // 510 // CHECK2-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv 511 // CHECK2-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR0]] comdat align 2 { 512 // CHECK2-NEXT: entry: 513 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 514 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 515 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 516 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 517 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 518 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8 519 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8 520 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8 521 // CHECK2-NEXT: [[_TMP6:%.*]] = alloca i32, align 4 522 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 8 523 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 8 524 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 8 525 // CHECK2-NEXT: [[_TMP13:%.*]] = alloca i32, align 4 526 // CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 527 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 528 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0 529 // CHECK2-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 530 // CHECK2-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS** 531 // CHECK2-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 8 532 // CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 533 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]** 534 // CHECK2-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 8 535 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 536 // CHECK2-NEXT: store i8* null, i8** [[TMP4]], align 8 537 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 538 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 539 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) 540 // CHECK2-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 541 // CHECK2-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 542 // CHECK2-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 543 // CHECK2: omp_offload.failed: 544 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] 545 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 546 // CHECK2: omp_offload.cont: 547 // CHECK2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 548 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 549 // CHECK2-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS** 550 // CHECK2-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 8 551 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 552 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]** 553 // CHECK2-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 8 554 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0 555 // CHECK2-NEXT: store i8* null, i8** [[TMP13]], align 8 556 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 557 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 558 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 123) 559 // CHECK2-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 560 // CHECK2-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 561 // CHECK2-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] 562 // CHECK2: omp_offload.failed7: 563 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33(%struct.SS* [[THIS1]]) #[[ATTR2]] 564 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT8]] 565 // CHECK2: omp_offload.cont8: 566 // CHECK2-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 567 // CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 568 // CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS** 569 // CHECK2-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 8 570 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 571 // CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]** 572 // CHECK2-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 8 573 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i64 0, i64 0 574 // CHECK2-NEXT: store i8* null, i8** [[TMP22]], align 8 575 // CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 576 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 577 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 123) 578 // CHECK2-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 579 // CHECK2-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 580 // CHECK2-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] 581 // CHECK2: omp_offload.failed14: 582 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38(%struct.SS* [[THIS1]]) #[[ATTR2]] 583 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT15]] 584 // CHECK2: omp_offload.cont15: 585 // CHECK2-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 586 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A16]], i64 0, i64 0 587 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 588 // CHECK2-NEXT: ret i32 [[TMP27]] 589 // 590 // 591 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 592 // CHECK2-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] { 593 // CHECK2-NEXT: entry: 594 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 595 // CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 596 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 597 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 598 // CHECK2-NEXT: ret void 599 // 600 // 601 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 602 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 603 // CHECK2-NEXT: entry: 604 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 605 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 606 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 607 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 608 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 609 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 610 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 611 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 612 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 613 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 614 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 615 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 616 // CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 617 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 618 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 619 // CHECK2-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 620 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 621 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 622 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 623 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 624 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 625 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 626 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 627 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 628 // CHECK2: cond.true: 629 // CHECK2-NEXT: br label [[COND_END:%.*]] 630 // CHECK2: cond.false: 631 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 632 // CHECK2-NEXT: br label [[COND_END]] 633 // CHECK2: cond.end: 634 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 635 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 636 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 637 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 638 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 639 // CHECK2: omp.inner.for.cond: 640 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 641 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 642 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 643 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 644 // CHECK2: omp.inner.for.body: 645 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 646 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 647 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 648 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 649 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 650 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 651 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 652 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 653 // CHECK2-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !6 654 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 655 // CHECK2: omp.body.continue: 656 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 657 // CHECK2: omp.inner.for.inc: 658 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 659 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 660 // CHECK2-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 661 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 662 // CHECK2: omp.inner.for.end: 663 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 664 // CHECK2: omp.loop.exit: 665 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 666 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 667 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 668 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 669 // CHECK2: .omp.final.then: 670 // CHECK2-NEXT: store i32 123, i32* [[I]], align 4 671 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 672 // CHECK2: .omp.final.done: 673 // CHECK2-NEXT: ret void 674 // 675 // 676 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33 677 // CHECK2-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 678 // CHECK2-NEXT: entry: 679 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 680 // CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 681 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 682 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 683 // CHECK2-NEXT: ret void 684 // 685 // 686 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1 687 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 688 // CHECK2-NEXT: entry: 689 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 690 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 691 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 692 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 693 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 694 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 695 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 696 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 697 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 698 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 699 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 700 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 701 // CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 702 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 703 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 704 // CHECK2-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 705 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 706 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 707 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 708 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 709 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 710 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 711 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 712 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 713 // CHECK2: cond.true: 714 // CHECK2-NEXT: br label [[COND_END:%.*]] 715 // CHECK2: cond.false: 716 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 717 // CHECK2-NEXT: br label [[COND_END]] 718 // CHECK2: cond.end: 719 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 720 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 721 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 722 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 723 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 724 // CHECK2: omp.inner.for.cond: 725 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 726 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 727 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 728 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 729 // CHECK2: omp.inner.for.body: 730 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 731 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 732 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 733 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 734 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 735 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 736 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 737 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 738 // CHECK2-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !12 739 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 740 // CHECK2: omp.body.continue: 741 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 742 // CHECK2: omp.inner.for.inc: 743 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 744 // CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 745 // CHECK2-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 746 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 747 // CHECK2: omp.inner.for.end: 748 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 749 // CHECK2: omp.loop.exit: 750 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 751 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 752 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 753 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 754 // CHECK2: .omp.final.then: 755 // CHECK2-NEXT: store i32 123, i32* [[I]], align 4 756 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 757 // CHECK2: .omp.final.done: 758 // CHECK2-NEXT: ret void 759 // 760 // 761 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38 762 // CHECK2-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 763 // CHECK2-NEXT: entry: 764 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 765 // CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 766 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 767 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 768 // CHECK2-NEXT: ret void 769 // 770 // 771 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 772 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 773 // CHECK2-NEXT: entry: 774 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 775 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 776 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 777 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 778 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 779 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 780 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 781 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 782 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 783 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 784 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 785 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 786 // CHECK2-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 787 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 788 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 789 // CHECK2-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 790 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 791 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 792 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 793 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 794 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61) 795 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 796 // CHECK2: omp.dispatch.cond: 797 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 798 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 799 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 800 // CHECK2: cond.true: 801 // CHECK2-NEXT: br label [[COND_END:%.*]] 802 // CHECK2: cond.false: 803 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 804 // CHECK2-NEXT: br label [[COND_END]] 805 // CHECK2: cond.end: 806 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 807 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 808 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 809 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 810 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 811 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 812 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 813 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 814 // CHECK2: omp.dispatch.body: 815 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 816 // CHECK2: omp.inner.for.cond: 817 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 818 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15 819 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 820 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 821 // CHECK2: omp.inner.for.body: 822 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 823 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 824 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 825 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !15 826 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 827 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !15 828 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 829 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 830 // CHECK2-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !15 831 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 832 // CHECK2: omp.body.continue: 833 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 834 // CHECK2: omp.inner.for.inc: 835 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 836 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 837 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 838 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 839 // CHECK2: omp.inner.for.end: 840 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 841 // CHECK2: omp.dispatch.inc: 842 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 843 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 844 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 845 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4 846 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 847 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 848 // CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 849 // CHECK2-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4 850 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]] 851 // CHECK2: omp.dispatch.end: 852 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 853 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 854 // CHECK2-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 855 // CHECK2-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 856 // CHECK2: .omp.final.then: 857 // CHECK2-NEXT: store i32 123, i32* [[I]], align 4 858 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 859 // CHECK2: .omp.final.done: 860 // CHECK2-NEXT: ret void 861 // 862 // 863 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 864 // CHECK2-SAME: () #[[ATTR3:[0-9]+]] { 865 // CHECK2-NEXT: entry: 866 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1) 867 // CHECK2-NEXT: ret void 868 // 869 // 870 // CHECK3-LABEL: define {{[^@]+}}@_Z21teams_template_structv 871 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] { 872 // CHECK3-NEXT: entry: 873 // CHECK3-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4 874 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef [[V]]) 875 // CHECK3-NEXT: ret i32 [[CALL]] 876 // 877 // 878 // CHECK3-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv 879 // CHECK3-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR0]] comdat align 2 { 880 // CHECK3-NEXT: entry: 881 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 882 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 883 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 884 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 885 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 886 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4 887 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4 888 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4 889 // CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32, align 4 890 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4 891 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4 892 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4 893 // CHECK3-NEXT: [[_TMP13:%.*]] = alloca i32, align 4 894 // CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 895 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 896 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0 897 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 898 // CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS** 899 // CHECK3-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4 900 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 901 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]** 902 // CHECK3-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4 903 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 904 // CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4 905 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 906 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 907 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) 908 // CHECK3-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 909 // CHECK3-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 910 // CHECK3-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 911 // CHECK3: omp_offload.failed: 912 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] 913 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 914 // CHECK3: omp_offload.cont: 915 // CHECK3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 916 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 917 // CHECK3-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS** 918 // CHECK3-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4 919 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 920 // CHECK3-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]** 921 // CHECK3-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4 922 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 923 // CHECK3-NEXT: store i8* null, i8** [[TMP13]], align 4 924 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 925 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 926 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 123) 927 // CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 928 // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 929 // CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] 930 // CHECK3: omp_offload.failed7: 931 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33(%struct.SS* [[THIS1]]) #[[ATTR2]] 932 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT8]] 933 // CHECK3: omp_offload.cont8: 934 // CHECK3-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 935 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 936 // CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS** 937 // CHECK3-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4 938 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 939 // CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]** 940 // CHECK3-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4 941 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0 942 // CHECK3-NEXT: store i8* null, i8** [[TMP22]], align 4 943 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 944 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 945 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 123) 946 // CHECK3-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 947 // CHECK3-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 948 // CHECK3-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] 949 // CHECK3: omp_offload.failed14: 950 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38(%struct.SS* [[THIS1]]) #[[ATTR2]] 951 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT15]] 952 // CHECK3: omp_offload.cont15: 953 // CHECK3-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 954 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A16]], i32 0, i32 0 955 // CHECK3-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 956 // CHECK3-NEXT: ret i32 [[TMP27]] 957 // 958 // 959 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 960 // CHECK3-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] { 961 // CHECK3-NEXT: entry: 962 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 963 // CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 964 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 965 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 966 // CHECK3-NEXT: ret void 967 // 968 // 969 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. 970 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 971 // CHECK3-NEXT: entry: 972 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 973 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 974 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 975 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 976 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 977 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 978 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 979 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 980 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 981 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 982 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 983 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 984 // CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 985 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 986 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 987 // CHECK3-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 988 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 989 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 990 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 991 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 992 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 993 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 994 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 995 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 996 // CHECK3: cond.true: 997 // CHECK3-NEXT: br label [[COND_END:%.*]] 998 // CHECK3: cond.false: 999 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1000 // CHECK3-NEXT: br label [[COND_END]] 1001 // CHECK3: cond.end: 1002 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1003 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1004 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1005 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1006 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1007 // CHECK3: omp.inner.for.cond: 1008 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1009 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7 1010 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1011 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1012 // CHECK3: omp.inner.for.body: 1013 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1014 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 1015 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1016 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !7 1017 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 1018 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7 1019 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP9]] 1020 // CHECK3-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !7 1021 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1022 // CHECK3: omp.body.continue: 1023 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1024 // CHECK3: omp.inner.for.inc: 1025 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1026 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 1027 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1028 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] 1029 // CHECK3: omp.inner.for.end: 1030 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1031 // CHECK3: omp.loop.exit: 1032 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1033 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1034 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1035 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1036 // CHECK3: .omp.final.then: 1037 // CHECK3-NEXT: store i32 123, i32* [[I]], align 4 1038 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 1039 // CHECK3: .omp.final.done: 1040 // CHECK3-NEXT: ret void 1041 // 1042 // 1043 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33 1044 // CHECK3-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 1045 // CHECK3-NEXT: entry: 1046 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1047 // CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1048 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1049 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 1050 // CHECK3-NEXT: ret void 1051 // 1052 // 1053 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1 1054 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 1055 // CHECK3-NEXT: entry: 1056 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1057 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1058 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1059 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1060 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 1061 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1062 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1063 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1064 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1065 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 1066 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1067 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1068 // CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1069 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1070 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1071 // CHECK3-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 1072 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1073 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1074 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1075 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1076 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1077 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1078 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 1079 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1080 // CHECK3: cond.true: 1081 // CHECK3-NEXT: br label [[COND_END:%.*]] 1082 // CHECK3: cond.false: 1083 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1084 // CHECK3-NEXT: br label [[COND_END]] 1085 // CHECK3: cond.end: 1086 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1087 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1088 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1089 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1090 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1091 // CHECK3: omp.inner.for.cond: 1092 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 1093 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 1094 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1095 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1096 // CHECK3: omp.inner.for.body: 1097 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 1098 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 1099 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1100 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13 1101 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 1102 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13 1103 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP9]] 1104 // CHECK3-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13 1105 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1106 // CHECK3: omp.body.continue: 1107 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1108 // CHECK3: omp.inner.for.inc: 1109 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 1110 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 1111 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 1112 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 1113 // CHECK3: omp.inner.for.end: 1114 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1115 // CHECK3: omp.loop.exit: 1116 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1117 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1118 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1119 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1120 // CHECK3: .omp.final.then: 1121 // CHECK3-NEXT: store i32 123, i32* [[I]], align 4 1122 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 1123 // CHECK3: .omp.final.done: 1124 // CHECK3-NEXT: ret void 1125 // 1126 // 1127 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38 1128 // CHECK3-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 1129 // CHECK3-NEXT: entry: 1130 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1131 // CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1132 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1133 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 1134 // CHECK3-NEXT: ret void 1135 // 1136 // 1137 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4 1138 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 1139 // CHECK3-NEXT: entry: 1140 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1141 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1142 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1143 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1144 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 1145 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1146 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1147 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1148 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1149 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 1150 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1151 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1152 // CHECK3-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1153 // CHECK3-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1154 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1155 // CHECK3-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 1156 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1157 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1158 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1159 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1160 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61) 1161 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 1162 // CHECK3: omp.dispatch.cond: 1163 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1164 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 1165 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1166 // CHECK3: cond.true: 1167 // CHECK3-NEXT: br label [[COND_END:%.*]] 1168 // CHECK3: cond.false: 1169 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1170 // CHECK3-NEXT: br label [[COND_END]] 1171 // CHECK3: cond.end: 1172 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1173 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1174 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1175 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1176 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1177 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1178 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1179 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 1180 // CHECK3: omp.dispatch.body: 1181 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1182 // CHECK3: omp.inner.for.cond: 1183 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 1184 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !16 1185 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 1186 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1187 // CHECK3: omp.inner.for.body: 1188 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 1189 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 1190 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1191 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !16 1192 // CHECK3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 1193 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !16 1194 // CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]] 1195 // CHECK3-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !16 1196 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1197 // CHECK3: omp.body.continue: 1198 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1199 // CHECK3: omp.inner.for.inc: 1200 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 1201 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 1202 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 1203 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] 1204 // CHECK3: omp.inner.for.end: 1205 // CHECK3-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 1206 // CHECK3: omp.dispatch.inc: 1207 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1208 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1209 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1210 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4 1211 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1212 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1213 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 1214 // CHECK3-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4 1215 // CHECK3-NEXT: br label [[OMP_DISPATCH_COND]] 1216 // CHECK3: omp.dispatch.end: 1217 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1218 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1219 // CHECK3-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 1220 // CHECK3-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1221 // CHECK3: .omp.final.then: 1222 // CHECK3-NEXT: store i32 123, i32* [[I]], align 4 1223 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 1224 // CHECK3: .omp.final.done: 1225 // CHECK3-NEXT: ret void 1226 // 1227 // 1228 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1229 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] { 1230 // CHECK3-NEXT: entry: 1231 // CHECK3-NEXT: call void @__tgt_register_requires(i64 1) 1232 // CHECK3-NEXT: ret void 1233 // 1234 // 1235 // CHECK4-LABEL: define {{[^@]+}}@_Z21teams_template_structv 1236 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] { 1237 // CHECK4-NEXT: entry: 1238 // CHECK4-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4 1239 // CHECK4-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef [[V]]) 1240 // CHECK4-NEXT: ret i32 [[CALL]] 1241 // 1242 // 1243 // CHECK4-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv 1244 // CHECK4-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR0]] comdat align 2 { 1245 // CHECK4-NEXT: entry: 1246 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1247 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 1248 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 1249 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 1250 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 1251 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4 1252 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4 1253 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4 1254 // CHECK4-NEXT: [[_TMP6:%.*]] = alloca i32, align 4 1255 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS10:%.*]] = alloca [1 x i8*], align 4 1256 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS11:%.*]] = alloca [1 x i8*], align 4 1257 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS12:%.*]] = alloca [1 x i8*], align 4 1258 // CHECK4-NEXT: [[_TMP13:%.*]] = alloca i32, align 4 1259 // CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1260 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1261 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0 1262 // CHECK4-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1263 // CHECK4-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to %struct.SS** 1264 // CHECK4-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP1]], align 4 1265 // CHECK4-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1266 // CHECK4-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [123 x i32]** 1267 // CHECK4-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP3]], align 4 1268 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1269 // CHECK4-NEXT: store i8* null, i8** [[TMP4]], align 4 1270 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1271 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1272 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) 1273 // CHECK4-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 1274 // CHECK4-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 1275 // CHECK4-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1276 // CHECK4: omp_offload.failed: 1277 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] 1278 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 1279 // CHECK4: omp_offload.cont: 1280 // CHECK4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1281 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 1282 // CHECK4-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to %struct.SS** 1283 // CHECK4-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP10]], align 4 1284 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 1285 // CHECK4-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [123 x i32]** 1286 // CHECK4-NEXT: store [123 x i32]* [[A2]], [123 x i32]** [[TMP12]], align 4 1287 // CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 1288 // CHECK4-NEXT: store i8* null, i8** [[TMP13]], align 4 1289 // CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 1290 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 1291 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 123) 1292 // CHECK4-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 1293 // CHECK4-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 1294 // CHECK4-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED7:%.*]], label [[OMP_OFFLOAD_CONT8:%.*]] 1295 // CHECK4: omp_offload.failed7: 1296 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33(%struct.SS* [[THIS1]]) #[[ATTR2]] 1297 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT8]] 1298 // CHECK4: omp_offload.cont8: 1299 // CHECK4-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1300 // CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 1301 // CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.SS** 1302 // CHECK4-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP19]], align 4 1303 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 1304 // CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [123 x i32]** 1305 // CHECK4-NEXT: store [123 x i32]* [[A9]], [123 x i32]** [[TMP21]], align 4 1306 // CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS12]], i32 0, i32 0 1307 // CHECK4-NEXT: store i8* null, i8** [[TMP22]], align 4 1308 // CHECK4-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS10]], i32 0, i32 0 1309 // CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS11]], i32 0, i32 0 1310 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 123) 1311 // CHECK4-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 1312 // CHECK4-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 1313 // CHECK4-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] 1314 // CHECK4: omp_offload.failed14: 1315 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38(%struct.SS* [[THIS1]]) #[[ATTR2]] 1316 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT15]] 1317 // CHECK4: omp_offload.cont15: 1318 // CHECK4-NEXT: [[A16:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1319 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A16]], i32 0, i32 0 1320 // CHECK4-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1321 // CHECK4-NEXT: ret i32 [[TMP27]] 1322 // 1323 // 1324 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l28 1325 // CHECK4-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1:[0-9]+]] { 1326 // CHECK4-NEXT: entry: 1327 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1328 // CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1329 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1330 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined. to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 1331 // CHECK4-NEXT: ret void 1332 // 1333 // 1334 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. 1335 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 1336 // CHECK4-NEXT: entry: 1337 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1338 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1339 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1340 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1341 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 1342 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1343 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1344 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1345 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1346 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 1347 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1348 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1349 // CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1350 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1351 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1352 // CHECK4-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 1353 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1354 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1355 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1356 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1357 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1358 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1359 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 1360 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1361 // CHECK4: cond.true: 1362 // CHECK4-NEXT: br label [[COND_END:%.*]] 1363 // CHECK4: cond.false: 1364 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1365 // CHECK4-NEXT: br label [[COND_END]] 1366 // CHECK4: cond.end: 1367 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1368 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1369 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1370 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1371 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1372 // CHECK4: omp.inner.for.cond: 1373 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1374 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7 1375 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1376 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1377 // CHECK4: omp.inner.for.body: 1378 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1379 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 1380 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1381 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !7 1382 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 1383 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7 1384 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP9]] 1385 // CHECK4-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !7 1386 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1387 // CHECK4: omp.body.continue: 1388 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1389 // CHECK4: omp.inner.for.inc: 1390 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1391 // CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 1392 // CHECK4-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1393 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] 1394 // CHECK4: omp.inner.for.end: 1395 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1396 // CHECK4: omp.loop.exit: 1397 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1398 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1399 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1400 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1401 // CHECK4: .omp.final.then: 1402 // CHECK4-NEXT: store i32 123, i32* [[I]], align 4 1403 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 1404 // CHECK4: .omp.final.done: 1405 // CHECK4-NEXT: ret void 1406 // 1407 // 1408 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l33 1409 // CHECK4-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 1410 // CHECK4-NEXT: entry: 1411 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1412 // CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1413 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1414 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 1415 // CHECK4-NEXT: ret void 1416 // 1417 // 1418 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1 1419 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 1420 // CHECK4-NEXT: entry: 1421 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1422 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1423 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1424 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1425 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 1426 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1427 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1428 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1429 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1430 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 1431 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1432 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1433 // CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1434 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1435 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1436 // CHECK4-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 1437 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1438 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1439 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1440 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1441 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1442 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1443 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 1444 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1445 // CHECK4: cond.true: 1446 // CHECK4-NEXT: br label [[COND_END:%.*]] 1447 // CHECK4: cond.false: 1448 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1449 // CHECK4-NEXT: br label [[COND_END]] 1450 // CHECK4: cond.end: 1451 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1452 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1453 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1454 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1455 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1456 // CHECK4: omp.inner.for.cond: 1457 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 1458 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 1459 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1460 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1461 // CHECK4: omp.inner.for.body: 1462 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 1463 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 1464 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1465 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13 1466 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 1467 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13 1468 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP9]] 1469 // CHECK4-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13 1470 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1471 // CHECK4: omp.body.continue: 1472 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1473 // CHECK4: omp.inner.for.inc: 1474 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 1475 // CHECK4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 1476 // CHECK4-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 1477 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 1478 // CHECK4: omp.inner.for.end: 1479 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1480 // CHECK4: omp.loop.exit: 1481 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1482 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1483 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1484 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1485 // CHECK4: .omp.final.then: 1486 // CHECK4-NEXT: store i32 123, i32* [[I]], align 4 1487 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 1488 // CHECK4: .omp.final.done: 1489 // CHECK4-NEXT: ret void 1490 // 1491 // 1492 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l38 1493 // CHECK4-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 1494 // CHECK4-NEXT: entry: 1495 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1496 // CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1497 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1498 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.SS*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), %struct.SS* [[TMP0]]) 1499 // CHECK4-NEXT: ret void 1500 // 1501 // 1502 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4 1503 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.SS* noundef [[THIS:%.*]]) #[[ATTR1]] { 1504 // CHECK4-NEXT: entry: 1505 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1506 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1507 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1508 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1509 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 1510 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1511 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1512 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1513 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1514 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 1515 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1516 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1517 // CHECK4-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1518 // CHECK4-NEXT: [[TMP0:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1519 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1520 // CHECK4-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 1521 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1522 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1523 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1524 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1525 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 61) 1526 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 1527 // CHECK4: omp.dispatch.cond: 1528 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1529 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 122 1530 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1531 // CHECK4: cond.true: 1532 // CHECK4-NEXT: br label [[COND_END:%.*]] 1533 // CHECK4: cond.false: 1534 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1535 // CHECK4-NEXT: br label [[COND_END]] 1536 // CHECK4: cond.end: 1537 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 122, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1538 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1539 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1540 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1541 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1542 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1543 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1544 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 1545 // CHECK4: omp.dispatch.body: 1546 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1547 // CHECK4: omp.inner.for.cond: 1548 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 1549 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !16 1550 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 1551 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1552 // CHECK4: omp.inner.for.body: 1553 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 1554 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 1555 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1556 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !16 1557 // CHECK4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[TMP0]], i32 0, i32 0 1558 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !16 1559 // CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP11]] 1560 // CHECK4-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !16 1561 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1562 // CHECK4: omp.body.continue: 1563 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1564 // CHECK4: omp.inner.for.inc: 1565 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 1566 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 1567 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 1568 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] 1569 // CHECK4: omp.inner.for.end: 1570 // CHECK4-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 1571 // CHECK4: omp.dispatch.inc: 1572 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1573 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1574 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1575 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4 1576 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1577 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1578 // CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 1579 // CHECK4-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4 1580 // CHECK4-NEXT: br label [[OMP_DISPATCH_COND]] 1581 // CHECK4: omp.dispatch.end: 1582 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1583 // CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1584 // CHECK4-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 1585 // CHECK4-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1586 // CHECK4: .omp.final.then: 1587 // CHECK4-NEXT: store i32 123, i32* [[I]], align 4 1588 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 1589 // CHECK4: .omp.final.done: 1590 // CHECK4-NEXT: ret void 1591 // 1592 // 1593 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1594 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] { 1595 // CHECK4-NEXT: entry: 1596 // CHECK4-NEXT: call void @__tgt_register_requires(i64 1) 1597 // CHECK4-NEXT: ret void 1598 // 1599 // 1600 // CHECK5-LABEL: define {{[^@]+}}@_Z21teams_template_structv 1601 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] { 1602 // CHECK5-NEXT: entry: 1603 // CHECK5-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4 1604 // CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef [[V]]) 1605 // CHECK5-NEXT: ret i32 [[CALL]] 1606 // 1607 // 1608 // CHECK5-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv 1609 // CHECK5-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR0]] comdat align 2 { 1610 // CHECK5-NEXT: entry: 1611 // CHECK5-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 1612 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 1613 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1614 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1615 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1616 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 1617 // CHECK5-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 1618 // CHECK5-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 1619 // CHECK5-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 1620 // CHECK5-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 1621 // CHECK5-NEXT: [[I7:%.*]] = alloca i32, align 4 1622 // CHECK5-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 1623 // CHECK5-NEXT: [[DOTOMP_LB21:%.*]] = alloca i32, align 4 1624 // CHECK5-NEXT: [[DOTOMP_UB22:%.*]] = alloca i32, align 4 1625 // CHECK5-NEXT: [[DOTOMP_IV23:%.*]] = alloca i32, align 4 1626 // CHECK5-NEXT: [[I24:%.*]] = alloca i32, align 4 1627 // CHECK5-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 1628 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 1629 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1630 // CHECK5-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 1631 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1632 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 1633 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1634 // CHECK5: omp.inner.for.cond: 1635 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1636 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 1637 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 1638 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1639 // CHECK5: omp.inner.for.body: 1640 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1641 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 1642 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1643 // CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 1644 // CHECK5-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0 1645 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2 1646 // CHECK5-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 1647 // CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 1648 // CHECK5-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !2 1649 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1650 // CHECK5: omp.body.continue: 1651 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1652 // CHECK5: omp.inner.for.inc: 1653 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1654 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 1655 // CHECK5-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1656 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 1657 // CHECK5: omp.inner.for.end: 1658 // CHECK5-NEXT: store i32 123, i32* [[I]], align 4 1659 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 1660 // CHECK5-NEXT: store i32 122, i32* [[DOTOMP_UB5]], align 4 1661 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 1662 // CHECK5-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV6]], align 4 1663 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 1664 // CHECK5: omp.inner.for.cond8: 1665 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 1666 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 1667 // CHECK5-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 1668 // CHECK5-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 1669 // CHECK5: omp.inner.for.body10: 1670 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 1671 // CHECK5-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1 1672 // CHECK5-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] 1673 // CHECK5-NEXT: store i32 [[ADD12]], i32* [[I7]], align 4, !llvm.access.group !6 1674 // CHECK5-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1675 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !6 1676 // CHECK5-NEXT: [[IDXPROM14:%.*]] = sext i32 [[TMP10]] to i64 1677 // CHECK5-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A13]], i64 0, i64 [[IDXPROM14]] 1678 // CHECK5-NEXT: store i32 0, i32* [[ARRAYIDX15]], align 4, !llvm.access.group !6 1679 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 1680 // CHECK5: omp.body.continue16: 1681 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 1682 // CHECK5: omp.inner.for.inc17: 1683 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 1684 // CHECK5-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1 1685 // CHECK5-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 1686 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]] 1687 // CHECK5: omp.inner.for.end19: 1688 // CHECK5-NEXT: store i32 123, i32* [[I7]], align 4 1689 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB21]], align 4 1690 // CHECK5-NEXT: store i32 122, i32* [[DOTOMP_UB22]], align 4 1691 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4 1692 // CHECK5-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV23]], align 4 1693 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND25:%.*]] 1694 // CHECK5: omp.inner.for.cond25: 1695 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9 1696 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !9 1697 // CHECK5-NEXT: [[CMP26:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 1698 // CHECK5-NEXT: br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END36:%.*]] 1699 // CHECK5: omp.inner.for.body27: 1700 // CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9 1701 // CHECK5-NEXT: [[MUL28:%.*]] = mul nsw i32 [[TMP15]], 1 1702 // CHECK5-NEXT: [[ADD29:%.*]] = add nsw i32 0, [[MUL28]] 1703 // CHECK5-NEXT: store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !9 1704 // CHECK5-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1705 // CHECK5-NEXT: [[TMP16:%.*]] = load i32, i32* [[I24]], align 4, !llvm.access.group !9 1706 // CHECK5-NEXT: [[IDXPROM31:%.*]] = sext i32 [[TMP16]] to i64 1707 // CHECK5-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 [[IDXPROM31]] 1708 // CHECK5-NEXT: store i32 0, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !9 1709 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE33:%.*]] 1710 // CHECK5: omp.body.continue33: 1711 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC34:%.*]] 1712 // CHECK5: omp.inner.for.inc34: 1713 // CHECK5-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9 1714 // CHECK5-NEXT: [[ADD35:%.*]] = add nsw i32 [[TMP17]], 1 1715 // CHECK5-NEXT: store i32 [[ADD35]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9 1716 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]] 1717 // CHECK5: omp.inner.for.end36: 1718 // CHECK5-NEXT: store i32 123, i32* [[I24]], align 4 1719 // CHECK5-NEXT: [[A37:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1720 // CHECK5-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A37]], i64 0, i64 0 1721 // CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[ARRAYIDX38]], align 4 1722 // CHECK5-NEXT: ret i32 [[TMP18]] 1723 // 1724 // 1725 // CHECK6-LABEL: define {{[^@]+}}@_Z21teams_template_structv 1726 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] { 1727 // CHECK6-NEXT: entry: 1728 // CHECK6-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4 1729 // CHECK6-NEXT: [[CALL:%.*]] = call noundef signext i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef [[V]]) 1730 // CHECK6-NEXT: ret i32 [[CALL]] 1731 // 1732 // 1733 // CHECK6-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv 1734 // CHECK6-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR0]] comdat align 2 { 1735 // CHECK6-NEXT: entry: 1736 // CHECK6-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 8 1737 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 1738 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1739 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1740 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1741 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 1742 // CHECK6-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 1743 // CHECK6-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 1744 // CHECK6-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 1745 // CHECK6-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 1746 // CHECK6-NEXT: [[I7:%.*]] = alloca i32, align 4 1747 // CHECK6-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 1748 // CHECK6-NEXT: [[DOTOMP_LB21:%.*]] = alloca i32, align 4 1749 // CHECK6-NEXT: [[DOTOMP_UB22:%.*]] = alloca i32, align 4 1750 // CHECK6-NEXT: [[DOTOMP_IV23:%.*]] = alloca i32, align 4 1751 // CHECK6-NEXT: [[I24:%.*]] = alloca i32, align 4 1752 // CHECK6-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 1753 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 1754 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1755 // CHECK6-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 1756 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1757 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 1758 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1759 // CHECK6: omp.inner.for.cond: 1760 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1761 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 1762 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 1763 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1764 // CHECK6: omp.inner.for.body: 1765 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1766 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 1767 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1768 // CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 1769 // CHECK6-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0 1770 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2 1771 // CHECK6-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 1772 // CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 1773 // CHECK6-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !2 1774 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1775 // CHECK6: omp.body.continue: 1776 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1777 // CHECK6: omp.inner.for.inc: 1778 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1779 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 1780 // CHECK6-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1781 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 1782 // CHECK6: omp.inner.for.end: 1783 // CHECK6-NEXT: store i32 123, i32* [[I]], align 4 1784 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 1785 // CHECK6-NEXT: store i32 122, i32* [[DOTOMP_UB5]], align 4 1786 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 1787 // CHECK6-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV6]], align 4 1788 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 1789 // CHECK6: omp.inner.for.cond8: 1790 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 1791 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !6 1792 // CHECK6-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 1793 // CHECK6-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END19:%.*]] 1794 // CHECK6: omp.inner.for.body10: 1795 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 1796 // CHECK6-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1 1797 // CHECK6-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] 1798 // CHECK6-NEXT: store i32 [[ADD12]], i32* [[I7]], align 4, !llvm.access.group !6 1799 // CHECK6-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1800 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !6 1801 // CHECK6-NEXT: [[IDXPROM14:%.*]] = sext i32 [[TMP10]] to i64 1802 // CHECK6-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A13]], i64 0, i64 [[IDXPROM14]] 1803 // CHECK6-NEXT: store i32 0, i32* [[ARRAYIDX15]], align 4, !llvm.access.group !6 1804 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE16:%.*]] 1805 // CHECK6: omp.body.continue16: 1806 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC17:%.*]] 1807 // CHECK6: omp.inner.for.inc17: 1808 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 1809 // CHECK6-NEXT: [[ADD18:%.*]] = add nsw i32 [[TMP11]], 1 1810 // CHECK6-NEXT: store i32 [[ADD18]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !6 1811 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP7:![0-9]+]] 1812 // CHECK6: omp.inner.for.end19: 1813 // CHECK6-NEXT: store i32 123, i32* [[I7]], align 4 1814 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB21]], align 4 1815 // CHECK6-NEXT: store i32 122, i32* [[DOTOMP_UB22]], align 4 1816 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB21]], align 4 1817 // CHECK6-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV23]], align 4 1818 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND25:%.*]] 1819 // CHECK6: omp.inner.for.cond25: 1820 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9 1821 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB22]], align 4, !llvm.access.group !9 1822 // CHECK6-NEXT: [[CMP26:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 1823 // CHECK6-NEXT: br i1 [[CMP26]], label [[OMP_INNER_FOR_BODY27:%.*]], label [[OMP_INNER_FOR_END36:%.*]] 1824 // CHECK6: omp.inner.for.body27: 1825 // CHECK6-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9 1826 // CHECK6-NEXT: [[MUL28:%.*]] = mul nsw i32 [[TMP15]], 1 1827 // CHECK6-NEXT: [[ADD29:%.*]] = add nsw i32 0, [[MUL28]] 1828 // CHECK6-NEXT: store i32 [[ADD29]], i32* [[I24]], align 4, !llvm.access.group !9 1829 // CHECK6-NEXT: [[A30:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1830 // CHECK6-NEXT: [[TMP16:%.*]] = load i32, i32* [[I24]], align 4, !llvm.access.group !9 1831 // CHECK6-NEXT: [[IDXPROM31:%.*]] = sext i32 [[TMP16]] to i64 1832 // CHECK6-NEXT: [[ARRAYIDX32:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A30]], i64 0, i64 [[IDXPROM31]] 1833 // CHECK6-NEXT: store i32 0, i32* [[ARRAYIDX32]], align 4, !llvm.access.group !9 1834 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE33:%.*]] 1835 // CHECK6: omp.body.continue33: 1836 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC34:%.*]] 1837 // CHECK6: omp.inner.for.inc34: 1838 // CHECK6-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9 1839 // CHECK6-NEXT: [[ADD35:%.*]] = add nsw i32 [[TMP17]], 1 1840 // CHECK6-NEXT: store i32 [[ADD35]], i32* [[DOTOMP_IV23]], align 4, !llvm.access.group !9 1841 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND25]], !llvm.loop [[LOOP10:![0-9]+]] 1842 // CHECK6: omp.inner.for.end36: 1843 // CHECK6-NEXT: store i32 123, i32* [[I24]], align 4 1844 // CHECK6-NEXT: [[A37:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1845 // CHECK6-NEXT: [[ARRAYIDX38:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A37]], i64 0, i64 0 1846 // CHECK6-NEXT: [[TMP18:%.*]] = load i32, i32* [[ARRAYIDX38]], align 4 1847 // CHECK6-NEXT: ret i32 [[TMP18]] 1848 // 1849 // 1850 // CHECK7-LABEL: define {{[^@]+}}@_Z21teams_template_structv 1851 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] { 1852 // CHECK7-NEXT: entry: 1853 // CHECK7-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4 1854 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef [[V]]) 1855 // CHECK7-NEXT: ret i32 [[CALL]] 1856 // 1857 // 1858 // CHECK7-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv 1859 // CHECK7-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR0]] comdat align 2 { 1860 // CHECK7-NEXT: entry: 1861 // CHECK7-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1862 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 1863 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1864 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1865 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1866 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 1867 // CHECK7-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 1868 // CHECK7-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 1869 // CHECK7-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 1870 // CHECK7-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 1871 // CHECK7-NEXT: [[I7:%.*]] = alloca i32, align 4 1872 // CHECK7-NEXT: [[_TMP19:%.*]] = alloca i32, align 4 1873 // CHECK7-NEXT: [[DOTOMP_LB20:%.*]] = alloca i32, align 4 1874 // CHECK7-NEXT: [[DOTOMP_UB21:%.*]] = alloca i32, align 4 1875 // CHECK7-NEXT: [[DOTOMP_IV22:%.*]] = alloca i32, align 4 1876 // CHECK7-NEXT: [[I23:%.*]] = alloca i32, align 4 1877 // CHECK7-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 1878 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 1879 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1880 // CHECK7-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 1881 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1882 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 1883 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1884 // CHECK7: omp.inner.for.cond: 1885 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1886 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 1887 // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 1888 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1889 // CHECK7: omp.inner.for.body: 1890 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1891 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 1892 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1893 // CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 1894 // CHECK7-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0 1895 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3 1896 // CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP4]] 1897 // CHECK7-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !3 1898 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1899 // CHECK7: omp.body.continue: 1900 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1901 // CHECK7: omp.inner.for.inc: 1902 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1903 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 1904 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1905 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 1906 // CHECK7: omp.inner.for.end: 1907 // CHECK7-NEXT: store i32 123, i32* [[I]], align 4 1908 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 1909 // CHECK7-NEXT: store i32 122, i32* [[DOTOMP_UB5]], align 4 1910 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 1911 // CHECK7-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV6]], align 4 1912 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 1913 // CHECK7: omp.inner.for.cond8: 1914 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 1915 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 1916 // CHECK7-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 1917 // CHECK7-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 1918 // CHECK7: omp.inner.for.body10: 1919 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 1920 // CHECK7-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1 1921 // CHECK7-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] 1922 // CHECK7-NEXT: store i32 [[ADD12]], i32* [[I7]], align 4, !llvm.access.group !7 1923 // CHECK7-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1924 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !7 1925 // CHECK7-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A13]], i32 0, i32 [[TMP10]] 1926 // CHECK7-NEXT: store i32 0, i32* [[ARRAYIDX14]], align 4, !llvm.access.group !7 1927 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 1928 // CHECK7: omp.body.continue15: 1929 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 1930 // CHECK7: omp.inner.for.inc16: 1931 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 1932 // CHECK7-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP11]], 1 1933 // CHECK7-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 1934 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP8:![0-9]+]] 1935 // CHECK7: omp.inner.for.end18: 1936 // CHECK7-NEXT: store i32 123, i32* [[I7]], align 4 1937 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB20]], align 4 1938 // CHECK7-NEXT: store i32 122, i32* [[DOTOMP_UB21]], align 4 1939 // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB20]], align 4 1940 // CHECK7-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV22]], align 4 1941 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND24:%.*]] 1942 // CHECK7: omp.inner.for.cond24: 1943 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10 1944 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB21]], align 4, !llvm.access.group !10 1945 // CHECK7-NEXT: [[CMP25:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 1946 // CHECK7-NEXT: br i1 [[CMP25]], label [[OMP_INNER_FOR_BODY26:%.*]], label [[OMP_INNER_FOR_END34:%.*]] 1947 // CHECK7: omp.inner.for.body26: 1948 // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10 1949 // CHECK7-NEXT: [[MUL27:%.*]] = mul nsw i32 [[TMP15]], 1 1950 // CHECK7-NEXT: [[ADD28:%.*]] = add nsw i32 0, [[MUL27]] 1951 // CHECK7-NEXT: store i32 [[ADD28]], i32* [[I23]], align 4, !llvm.access.group !10 1952 // CHECK7-NEXT: [[A29:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1953 // CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[I23]], align 4, !llvm.access.group !10 1954 // CHECK7-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A29]], i32 0, i32 [[TMP16]] 1955 // CHECK7-NEXT: store i32 0, i32* [[ARRAYIDX30]], align 4, !llvm.access.group !10 1956 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE31:%.*]] 1957 // CHECK7: omp.body.continue31: 1958 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC32:%.*]] 1959 // CHECK7: omp.inner.for.inc32: 1960 // CHECK7-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10 1961 // CHECK7-NEXT: [[ADD33:%.*]] = add nsw i32 [[TMP17]], 1 1962 // CHECK7-NEXT: store i32 [[ADD33]], i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10 1963 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND24]], !llvm.loop [[LOOP11:![0-9]+]] 1964 // CHECK7: omp.inner.for.end34: 1965 // CHECK7-NEXT: store i32 123, i32* [[I23]], align 4 1966 // CHECK7-NEXT: [[A35:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 1967 // CHECK7-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A35]], i32 0, i32 0 1968 // CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[ARRAYIDX36]], align 4 1969 // CHECK7-NEXT: ret i32 [[TMP18]] 1970 // 1971 // 1972 // CHECK8-LABEL: define {{[^@]+}}@_Z21teams_template_structv 1973 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] { 1974 // CHECK8-NEXT: entry: 1975 // CHECK8-NEXT: [[V:%.*]] = alloca [[STRUCT_SS:%.*]], align 4 1976 // CHECK8-NEXT: [[CALL:%.*]] = call noundef i32 @_ZN2SSIiLi123ELx456EE3fooEv(%struct.SS* noundef [[V]]) 1977 // CHECK8-NEXT: ret i32 [[CALL]] 1978 // 1979 // 1980 // CHECK8-LABEL: define {{[^@]+}}@_ZN2SSIiLi123ELx456EE3fooEv 1981 // CHECK8-SAME: (%struct.SS* noundef [[THIS:%.*]]) #[[ATTR0]] comdat align 2 { 1982 // CHECK8-NEXT: entry: 1983 // CHECK8-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.SS*, align 4 1984 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 1985 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1986 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1987 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1988 // CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4 1989 // CHECK8-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 1990 // CHECK8-NEXT: [[DOTOMP_LB4:%.*]] = alloca i32, align 4 1991 // CHECK8-NEXT: [[DOTOMP_UB5:%.*]] = alloca i32, align 4 1992 // CHECK8-NEXT: [[DOTOMP_IV6:%.*]] = alloca i32, align 4 1993 // CHECK8-NEXT: [[I7:%.*]] = alloca i32, align 4 1994 // CHECK8-NEXT: [[_TMP19:%.*]] = alloca i32, align 4 1995 // CHECK8-NEXT: [[DOTOMP_LB20:%.*]] = alloca i32, align 4 1996 // CHECK8-NEXT: [[DOTOMP_UB21:%.*]] = alloca i32, align 4 1997 // CHECK8-NEXT: [[DOTOMP_IV22:%.*]] = alloca i32, align 4 1998 // CHECK8-NEXT: [[I23:%.*]] = alloca i32, align 4 1999 // CHECK8-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 2000 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 2001 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2002 // CHECK8-NEXT: store i32 122, i32* [[DOTOMP_UB]], align 4 2003 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2004 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 2005 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2006 // CHECK8: omp.inner.for.cond: 2007 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 2008 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 2009 // CHECK8-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 2010 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2011 // CHECK8: omp.inner.for.body: 2012 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 2013 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 2014 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2015 // CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 2016 // CHECK8-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SS:%.*]], %struct.SS* [[THIS1]], i32 0, i32 0 2017 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3 2018 // CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A]], i32 0, i32 [[TMP4]] 2019 // CHECK8-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !3 2020 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2021 // CHECK8: omp.body.continue: 2022 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2023 // CHECK8: omp.inner.for.inc: 2024 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 2025 // CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP5]], 1 2026 // CHECK8-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 2027 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 2028 // CHECK8: omp.inner.for.end: 2029 // CHECK8-NEXT: store i32 123, i32* [[I]], align 4 2030 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB4]], align 4 2031 // CHECK8-NEXT: store i32 122, i32* [[DOTOMP_UB5]], align 4 2032 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB4]], align 4 2033 // CHECK8-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV6]], align 4 2034 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 2035 // CHECK8: omp.inner.for.cond8: 2036 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 2037 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB5]], align 4, !llvm.access.group !7 2038 // CHECK8-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 2039 // CHECK8-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 2040 // CHECK8: omp.inner.for.body10: 2041 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 2042 // CHECK8-NEXT: [[MUL11:%.*]] = mul nsw i32 [[TMP9]], 1 2043 // CHECK8-NEXT: [[ADD12:%.*]] = add nsw i32 0, [[MUL11]] 2044 // CHECK8-NEXT: store i32 [[ADD12]], i32* [[I7]], align 4, !llvm.access.group !7 2045 // CHECK8-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 2046 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[I7]], align 4, !llvm.access.group !7 2047 // CHECK8-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A13]], i32 0, i32 [[TMP10]] 2048 // CHECK8-NEXT: store i32 0, i32* [[ARRAYIDX14]], align 4, !llvm.access.group !7 2049 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 2050 // CHECK8: omp.body.continue15: 2051 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 2052 // CHECK8: omp.inner.for.inc16: 2053 // CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 2054 // CHECK8-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP11]], 1 2055 // CHECK8-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV6]], align 4, !llvm.access.group !7 2056 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP8:![0-9]+]] 2057 // CHECK8: omp.inner.for.end18: 2058 // CHECK8-NEXT: store i32 123, i32* [[I7]], align 4 2059 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB20]], align 4 2060 // CHECK8-NEXT: store i32 122, i32* [[DOTOMP_UB21]], align 4 2061 // CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB20]], align 4 2062 // CHECK8-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV22]], align 4 2063 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND24:%.*]] 2064 // CHECK8: omp.inner.for.cond24: 2065 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10 2066 // CHECK8-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB21]], align 4, !llvm.access.group !10 2067 // CHECK8-NEXT: [[CMP25:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 2068 // CHECK8-NEXT: br i1 [[CMP25]], label [[OMP_INNER_FOR_BODY26:%.*]], label [[OMP_INNER_FOR_END34:%.*]] 2069 // CHECK8: omp.inner.for.body26: 2070 // CHECK8-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10 2071 // CHECK8-NEXT: [[MUL27:%.*]] = mul nsw i32 [[TMP15]], 1 2072 // CHECK8-NEXT: [[ADD28:%.*]] = add nsw i32 0, [[MUL27]] 2073 // CHECK8-NEXT: store i32 [[ADD28]], i32* [[I23]], align 4, !llvm.access.group !10 2074 // CHECK8-NEXT: [[A29:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 2075 // CHECK8-NEXT: [[TMP16:%.*]] = load i32, i32* [[I23]], align 4, !llvm.access.group !10 2076 // CHECK8-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A29]], i32 0, i32 [[TMP16]] 2077 // CHECK8-NEXT: store i32 0, i32* [[ARRAYIDX30]], align 4, !llvm.access.group !10 2078 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE31:%.*]] 2079 // CHECK8: omp.body.continue31: 2080 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC32:%.*]] 2081 // CHECK8: omp.inner.for.inc32: 2082 // CHECK8-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10 2083 // CHECK8-NEXT: [[ADD33:%.*]] = add nsw i32 [[TMP17]], 1 2084 // CHECK8-NEXT: store i32 [[ADD33]], i32* [[DOTOMP_IV22]], align 4, !llvm.access.group !10 2085 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND24]], !llvm.loop [[LOOP11:![0-9]+]] 2086 // CHECK8: omp.inner.for.end34: 2087 // CHECK8-NEXT: store i32 123, i32* [[I23]], align 4 2088 // CHECK8-NEXT: [[A35:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 2089 // CHECK8-NEXT: [[ARRAYIDX36:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A35]], i32 0, i32 0 2090 // CHECK8-NEXT: [[TMP18:%.*]] = load i32, i32* [[ARRAYIDX36]], align 4 2091 // CHECK8-NEXT: ret i32 [[TMP18]] 2092 // 2093 // 2094 // CHECK9-LABEL: define {{[^@]+}}@main 2095 // CHECK9-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 2096 // CHECK9-NEXT: entry: 2097 // CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 2098 // CHECK9-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 2099 // CHECK9-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 2100 // CHECK9-NEXT: [[N:%.*]] = alloca i32, align 4 2101 // CHECK9-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 2102 // CHECK9-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 2103 // CHECK9-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8 2104 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 2105 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 2106 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 2107 // CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 2108 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 2109 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2110 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2111 // CHECK9-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8 2112 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 2113 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 2114 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 2115 // CHECK9-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 2116 // CHECK9-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 2117 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 2118 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 2119 // CHECK9-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 2120 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [3 x i8*], align 8 2121 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [3 x i8*], align 8 2122 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [3 x i8*], align 8 2123 // CHECK9-NEXT: [[DOTOFFLOAD_SIZES23:%.*]] = alloca [3 x i64], align 8 2124 // CHECK9-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 2125 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 2126 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 2127 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 2128 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 2129 // CHECK9-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 2130 // CHECK9-NEXT: store i32 100, i32* [[N]], align 4 2131 // CHECK9-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4 2132 // CHECK9-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 2133 // CHECK9-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 2134 // CHECK9-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 2135 // CHECK9-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 2136 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 2137 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[N]], align 4 2138 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32* 2139 // CHECK9-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4 2140 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8 2141 // CHECK9-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4 2142 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2143 // CHECK9-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 2144 // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP7]], align 8 2145 // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2146 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 2147 // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 2148 // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 2149 // CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 2150 // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 2151 // CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 2152 // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 2153 // CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 2154 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 2155 // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 2156 // CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 2157 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 2158 // CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 2159 // CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 2160 // CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 2161 // CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 2162 // CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 2163 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** 2164 // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 2165 // CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 2166 // CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** 2167 // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 2168 // CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 2169 // CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 2170 // CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 2171 // CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 2172 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2173 // CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2174 // CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 2175 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 2176 // CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 2177 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2178 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 2179 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 2180 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 2181 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2182 // CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2183 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 2184 // CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 2185 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) 2186 // CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 2187 // CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 2188 // CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 2189 // CHECK9: omp_offload.failed: 2190 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] 2191 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] 2192 // CHECK9: omp_offload.cont: 2193 // CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 2194 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* 2195 // CHECK9-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 2196 // CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 2197 // CHECK9-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 2198 // CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 2199 // CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* 2200 // CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 2201 // CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 2202 // CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* 2203 // CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 2204 // CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 2205 // CHECK9-NEXT: store i64 4, i64* [[TMP40]], align 8 2206 // CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 2207 // CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 2208 // CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 2209 // CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* 2210 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 2211 // CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 2212 // CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* 2213 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 2214 // CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 2215 // CHECK9-NEXT: store i64 8, i64* [[TMP46]], align 8 2216 // CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 2217 // CHECK9-NEXT: store i8* null, i8** [[TMP47]], align 8 2218 // CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 2219 // CHECK9-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** 2220 // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 2221 // CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 2222 // CHECK9-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** 2223 // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 2224 // CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 2225 // CHECK9-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 2226 // CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 2227 // CHECK9-NEXT: store i8* null, i8** [[TMP53]], align 8 2228 // CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 2229 // CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 2230 // CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 2231 // CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 2232 // CHECK9-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 2233 // CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 2234 // CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 2235 // CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 2236 // CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 2237 // CHECK9-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 2238 // CHECK9-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 2239 // CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 2240 // CHECK9-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 2241 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) 2242 // CHECK9-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 2243 // CHECK9-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 2244 // CHECK9-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] 2245 // CHECK9: omp_offload.failed16: 2246 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] 2247 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]] 2248 // CHECK9: omp_offload.cont17: 2249 // CHECK9-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 2250 // CHECK9-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* 2251 // CHECK9-NEXT: store i32 [[TMP63]], i32* [[CONV19]], align 4 2252 // CHECK9-NEXT: [[TMP64:%.*]] = load i64, i64* [[N_CASTED18]], align 8 2253 // CHECK9-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP1]], 4 2254 // CHECK9-NEXT: [[TMP66:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 2255 // CHECK9-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* 2256 // CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP67]], align 8 2257 // CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 2258 // CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 2259 // CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 2260 // CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 2261 // CHECK9-NEXT: store i64 4, i64* [[TMP70]], align 8 2262 // CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 2263 // CHECK9-NEXT: store i8* null, i8** [[TMP71]], align 8 2264 // CHECK9-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 2265 // CHECK9-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* 2266 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 2267 // CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 2268 // CHECK9-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* 2269 // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP75]], align 8 2270 // CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 1 2271 // CHECK9-NEXT: store i64 8, i64* [[TMP76]], align 8 2272 // CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 2273 // CHECK9-NEXT: store i8* null, i8** [[TMP77]], align 8 2274 // CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 2275 // CHECK9-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32** 2276 // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP79]], align 8 2277 // CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 2278 // CHECK9-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** 2279 // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 8 2280 // CHECK9-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 2 2281 // CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP82]], align 8 2282 // CHECK9-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 2283 // CHECK9-NEXT: store i8* null, i8** [[TMP83]], align 8 2284 // CHECK9-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 2285 // CHECK9-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 2286 // CHECK9-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 2287 // CHECK9-NEXT: [[TMP87:%.*]] = load i32, i32* [[N]], align 4 2288 // CHECK9-NEXT: store i32 [[TMP87]], i32* [[DOTCAPTURE_EXPR_25]], align 4 2289 // CHECK9-NEXT: [[TMP88:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 2290 // CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP88]], 0 2291 // CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 2292 // CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 2293 // CHECK9-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 2294 // CHECK9-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 2295 // CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP89]], 1 2296 // CHECK9-NEXT: [[TMP90:%.*]] = zext i32 [[ADD30]] to i64 2297 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP90]]) 2298 // CHECK9-NEXT: [[TMP91:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP84]], i8** [[TMP85]], i64* [[TMP86]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 2299 // CHECK9-NEXT: [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0 2300 // CHECK9-NEXT: br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] 2301 // CHECK9: omp_offload.failed31: 2302 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP64]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] 2303 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT32]] 2304 // CHECK9: omp_offload.cont32: 2305 // CHECK9-NEXT: [[TMP93:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 2306 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP93]]) 2307 // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 2308 // CHECK9-NEXT: [[TMP94:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 2309 // CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP94]]) 2310 // CHECK9-NEXT: [[TMP95:%.*]] = load i32, i32* [[RETVAL]], align 4 2311 // CHECK9-NEXT: ret i32 [[TMP95]] 2312 // 2313 // 2314 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 2315 // CHECK9-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] { 2316 // CHECK9-NEXT: entry: 2317 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 2318 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2319 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 2320 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 2321 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2322 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 2323 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* 2324 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2325 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 2326 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) 2327 // CHECK9-NEXT: ret void 2328 // 2329 // 2330 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined. 2331 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 2332 // CHECK9-NEXT: entry: 2333 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2334 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2335 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 2336 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2337 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 2338 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2339 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 2340 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2341 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2342 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 2343 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2344 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2345 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2346 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2347 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4 2348 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2349 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2350 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 2351 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2352 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 2353 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 2354 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2355 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 2356 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 2357 // CHECK9-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 2358 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2359 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 2360 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 2361 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 2362 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2363 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4 2364 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2365 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 2366 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 2367 // CHECK9: omp.precond.then: 2368 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2369 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2370 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 2371 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2372 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2373 // CHECK9-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2374 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2375 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2376 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2377 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2378 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] 2379 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2380 // CHECK9: cond.true: 2381 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2382 // CHECK9-NEXT: br label [[COND_END:%.*]] 2383 // CHECK9: cond.false: 2384 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2385 // CHECK9-NEXT: br label [[COND_END]] 2386 // CHECK9: cond.end: 2387 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 2388 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2389 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2390 // CHECK9-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 2391 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2392 // CHECK9: omp.inner.for.cond: 2393 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 2394 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !9 2395 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 2396 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2397 // CHECK9: omp.inner.for.body: 2398 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 2399 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 2400 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2401 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !9 2402 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !9 2403 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 2404 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[IDXPROM]] 2405 // CHECK9-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !9 2406 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2407 // CHECK9: omp.body.continue: 2408 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2409 // CHECK9: omp.inner.for.inc: 2410 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 2411 // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 2412 // CHECK9-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 2413 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] 2414 // CHECK9: omp.inner.for.end: 2415 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2416 // CHECK9: omp.loop.exit: 2417 // CHECK9-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2418 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4 2419 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]]) 2420 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2421 // CHECK9-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 2422 // CHECK9-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2423 // CHECK9: .omp.final.then: 2424 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2425 // CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 2426 // CHECK9-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 2427 // CHECK9-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 2428 // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]] 2429 // CHECK9-NEXT: store i32 [[ADD10]], i32* [[I3]], align 4 2430 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 2431 // CHECK9: .omp.final.done: 2432 // CHECK9-NEXT: br label [[OMP_PRECOND_END]] 2433 // CHECK9: omp.precond.end: 2434 // CHECK9-NEXT: ret void 2435 // 2436 // 2437 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105 2438 // CHECK9-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 2439 // CHECK9-NEXT: entry: 2440 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 2441 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2442 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 2443 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 2444 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2445 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 2446 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* 2447 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2448 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 2449 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) 2450 // CHECK9-NEXT: ret void 2451 // 2452 // 2453 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1 2454 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 2455 // CHECK9-NEXT: entry: 2456 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2457 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2458 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 2459 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2460 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 2461 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2462 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 2463 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2464 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2465 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 2466 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2467 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2468 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2469 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2470 // CHECK9-NEXT: [[I3:%.*]] = alloca i32, align 4 2471 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2472 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2473 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 2474 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2475 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 2476 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 2477 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2478 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 2479 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 2480 // CHECK9-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 2481 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2482 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 2483 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 2484 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 2485 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2486 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4 2487 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2488 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 2489 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 2490 // CHECK9: omp.precond.then: 2491 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2492 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2493 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 2494 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2495 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2496 // CHECK9-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2497 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2498 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2499 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2500 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2501 // CHECK9-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] 2502 // CHECK9-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2503 // CHECK9: cond.true: 2504 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2505 // CHECK9-NEXT: br label [[COND_END:%.*]] 2506 // CHECK9: cond.false: 2507 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2508 // CHECK9-NEXT: br label [[COND_END]] 2509 // CHECK9: cond.end: 2510 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 2511 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2512 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2513 // CHECK9-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 2514 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2515 // CHECK9: omp.inner.for.cond: 2516 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 2517 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15 2518 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 2519 // CHECK9-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2520 // CHECK9: omp.inner.for.body: 2521 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 2522 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 2523 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2524 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !15 2525 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15 2526 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 2527 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[IDXPROM]] 2528 // CHECK9-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !15 2529 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2530 // CHECK9: omp.body.continue: 2531 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2532 // CHECK9: omp.inner.for.inc: 2533 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 2534 // CHECK9-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 2535 // CHECK9-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 2536 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 2537 // CHECK9: omp.inner.for.end: 2538 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2539 // CHECK9: omp.loop.exit: 2540 // CHECK9-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2541 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4 2542 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]]) 2543 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2544 // CHECK9-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 2545 // CHECK9-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2546 // CHECK9: .omp.final.then: 2547 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2548 // CHECK9-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 2549 // CHECK9-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 2550 // CHECK9-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 2551 // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]] 2552 // CHECK9-NEXT: store i32 [[ADD10]], i32* [[I3]], align 4 2553 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 2554 // CHECK9: .omp.final.done: 2555 // CHECK9-NEXT: br label [[OMP_PRECOND_END]] 2556 // CHECK9: omp.precond.end: 2557 // CHECK9-NEXT: ret void 2558 // 2559 // 2560 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110 2561 // CHECK9-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 2562 // CHECK9-NEXT: entry: 2563 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 2564 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2565 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 2566 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 2567 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 2568 // CHECK9-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 2569 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2570 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 2571 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* 2572 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2573 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 2574 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 4 2575 // CHECK9-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4 2576 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 2577 // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 2578 // CHECK9-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 2579 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 2580 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) 2581 // CHECK9-NEXT: ret void 2582 // 2583 // 2584 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3 2585 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 2586 // CHECK9-NEXT: entry: 2587 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2588 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2589 // CHECK9-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 2590 // CHECK9-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 2591 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 2592 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 2593 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2594 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 2595 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 2596 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 2597 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 2598 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2599 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2600 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2601 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2602 // CHECK9-NEXT: [[I4:%.*]] = alloca i32, align 4 2603 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2604 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2605 // CHECK9-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 2606 // CHECK9-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 2607 // CHECK9-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 2608 // CHECK9-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 2609 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 2610 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 2611 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 2612 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 2613 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 2614 // CHECK9-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_1]], align 4 2615 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2616 // CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 2617 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 2618 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 2619 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 2620 // CHECK9-NEXT: store i32 0, i32* [[I]], align 4 2621 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2622 // CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 2623 // CHECK9-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 2624 // CHECK9: omp.precond.then: 2625 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2626 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 2627 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 2628 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2629 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2630 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[CONV]], align 4 2631 // CHECK9-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2632 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 2633 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]]) 2634 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 2635 // CHECK9: omp.dispatch.cond: 2636 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2637 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 2638 // CHECK9-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]] 2639 // CHECK9-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2640 // CHECK9: cond.true: 2641 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 2642 // CHECK9-NEXT: br label [[COND_END:%.*]] 2643 // CHECK9: cond.false: 2644 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2645 // CHECK9-NEXT: br label [[COND_END]] 2646 // CHECK9: cond.end: 2647 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ] 2648 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2649 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2650 // CHECK9-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4 2651 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 2652 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2653 // CHECK9-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] 2654 // CHECK9-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 2655 // CHECK9: omp.dispatch.body: 2656 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2657 // CHECK9: omp.inner.for.cond: 2658 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 2659 // CHECK9-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18 2660 // CHECK9-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 2661 // CHECK9-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2662 // CHECK9: omp.inner.for.body: 2663 // CHECK9-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 2664 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1 2665 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2666 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !18 2667 // CHECK9-NEXT: [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !18 2668 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64 2669 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[IDXPROM]] 2670 // CHECK9-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18 2671 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2672 // CHECK9: omp.body.continue: 2673 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2674 // CHECK9: omp.inner.for.inc: 2675 // CHECK9-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 2676 // CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP21]], 1 2677 // CHECK9-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 2678 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 2679 // CHECK9: omp.inner.for.end: 2680 // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 2681 // CHECK9: omp.dispatch.inc: 2682 // CHECK9-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2683 // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 2684 // CHECK9-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]] 2685 // CHECK9-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_LB]], align 4 2686 // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2687 // CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 2688 // CHECK9-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]] 2689 // CHECK9-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_UB]], align 4 2690 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]] 2691 // CHECK9: omp.dispatch.end: 2692 // CHECK9-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2693 // CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 2694 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]]) 2695 // CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2696 // CHECK9-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 2697 // CHECK9-NEXT: br i1 [[TMP29]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2698 // CHECK9: .omp.final.then: 2699 // CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 2700 // CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP30]], 0 2701 // CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 2702 // CHECK9-NEXT: [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1 2703 // CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 2704 // CHECK9-NEXT: store i32 [[ADD14]], i32* [[I4]], align 4 2705 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 2706 // CHECK9: .omp.final.done: 2707 // CHECK9-NEXT: br label [[OMP_PRECOND_END]] 2708 // CHECK9: omp.precond.end: 2709 // CHECK9-NEXT: ret void 2710 // 2711 // 2712 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_ 2713 // CHECK9-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat { 2714 // CHECK9-NEXT: entry: 2715 // CHECK9-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 2716 // CHECK9-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 2717 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 2718 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 2719 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 2720 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 2721 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8 2722 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8 2723 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8 2724 // CHECK9-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 2725 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8 2726 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8 2727 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8 2728 // CHECK9-NEXT: [[_TMP10:%.*]] = alloca i32, align 4 2729 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 2730 // CHECK9-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2731 // CHECK9-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]** 2732 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8 2733 // CHECK9-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2734 // CHECK9-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]** 2735 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8 2736 // CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 2737 // CHECK9-NEXT: store i8* null, i8** [[TMP4]], align 8 2738 // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2739 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2740 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 2741 // CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 2742 // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 2743 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 2744 // CHECK9: omp_offload.failed: 2745 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79([10 x i32]* [[A]]) #[[ATTR3]] 2746 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] 2747 // CHECK9: omp_offload.cont: 2748 // CHECK9-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 2749 // CHECK9-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]** 2750 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8 2751 // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 2752 // CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]** 2753 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8 2754 // CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0 2755 // CHECK9-NEXT: store i8* null, i8** [[TMP13]], align 8 2756 // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 2757 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 2758 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 2759 // CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 2760 // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 2761 // CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 2762 // CHECK9: omp_offload.failed5: 2763 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84([10 x i32]* [[A]]) #[[ATTR3]] 2764 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT6]] 2765 // CHECK9: omp_offload.cont6: 2766 // CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 2767 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to [10 x i32]** 2768 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP19]], align 8 2769 // CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 2770 // CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [10 x i32]** 2771 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP21]], align 8 2772 // CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0 2773 // CHECK9-NEXT: store i8* null, i8** [[TMP22]], align 8 2774 // CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 2775 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 2776 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 2777 // CHECK9-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 2778 // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 2779 // CHECK9-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] 2780 // CHECK9: omp_offload.failed11: 2781 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89([10 x i32]* [[A]]) #[[ATTR3]] 2782 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT12]] 2783 // CHECK9: omp_offload.cont12: 2784 // CHECK9-NEXT: ret i32 0 2785 // 2786 // 2787 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79 2788 // CHECK9-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 2789 // CHECK9-NEXT: entry: 2790 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 2791 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 2792 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 2793 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 2794 // CHECK9-NEXT: ret void 2795 // 2796 // 2797 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 2798 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 2799 // CHECK9-NEXT: entry: 2800 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2801 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2802 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 2803 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2804 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 2805 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2806 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2807 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2808 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2809 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 2810 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2811 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2812 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 2813 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 2814 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2815 // CHECK9-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 2816 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2817 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2818 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2819 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 2820 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2821 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2822 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 2823 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2824 // CHECK9: cond.true: 2825 // CHECK9-NEXT: br label [[COND_END:%.*]] 2826 // CHECK9: cond.false: 2827 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2828 // CHECK9-NEXT: br label [[COND_END]] 2829 // CHECK9: cond.end: 2830 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 2831 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2832 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2833 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 2834 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2835 // CHECK9: omp.inner.for.cond: 2836 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 2837 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21 2838 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 2839 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2840 // CHECK9: omp.inner.for.body: 2841 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 2842 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 2843 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2844 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21 2845 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21 2846 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 2847 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]] 2848 // CHECK9-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 2849 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2850 // CHECK9: omp.body.continue: 2851 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2852 // CHECK9: omp.inner.for.inc: 2853 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 2854 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 2855 // CHECK9-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 2856 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 2857 // CHECK9: omp.inner.for.end: 2858 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2859 // CHECK9: omp.loop.exit: 2860 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 2861 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2862 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2863 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2864 // CHECK9: .omp.final.then: 2865 // CHECK9-NEXT: store i32 10, i32* [[I]], align 4 2866 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 2867 // CHECK9: .omp.final.done: 2868 // CHECK9-NEXT: ret void 2869 // 2870 // 2871 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84 2872 // CHECK9-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 2873 // CHECK9-NEXT: entry: 2874 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 2875 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 2876 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 2877 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 2878 // CHECK9-NEXT: ret void 2879 // 2880 // 2881 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 2882 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 2883 // CHECK9-NEXT: entry: 2884 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2885 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2886 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 2887 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2888 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 2889 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2890 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2891 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2892 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2893 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 2894 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2895 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2896 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 2897 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 2898 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2899 // CHECK9-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 2900 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2901 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2902 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2903 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 2904 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2905 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2906 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 2907 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2908 // CHECK9: cond.true: 2909 // CHECK9-NEXT: br label [[COND_END:%.*]] 2910 // CHECK9: cond.false: 2911 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2912 // CHECK9-NEXT: br label [[COND_END]] 2913 // CHECK9: cond.end: 2914 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 2915 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2916 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2917 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 2918 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2919 // CHECK9: omp.inner.for.cond: 2920 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 2921 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24 2922 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 2923 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2924 // CHECK9: omp.inner.for.body: 2925 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 2926 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 2927 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2928 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24 2929 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !24 2930 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 2931 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]] 2932 // CHECK9-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 2933 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2934 // CHECK9: omp.body.continue: 2935 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2936 // CHECK9: omp.inner.for.inc: 2937 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 2938 // CHECK9-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 2939 // CHECK9-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 2940 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 2941 // CHECK9: omp.inner.for.end: 2942 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2943 // CHECK9: omp.loop.exit: 2944 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 2945 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2946 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2947 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2948 // CHECK9: .omp.final.then: 2949 // CHECK9-NEXT: store i32 10, i32* [[I]], align 4 2950 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 2951 // CHECK9: .omp.final.done: 2952 // CHECK9-NEXT: ret void 2953 // 2954 // 2955 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89 2956 // CHECK9-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 2957 // CHECK9-NEXT: entry: 2958 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 2959 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 2960 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 2961 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 2962 // CHECK9-NEXT: ret void 2963 // 2964 // 2965 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 2966 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 2967 // CHECK9-NEXT: entry: 2968 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2969 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2970 // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 2971 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2972 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 2973 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2974 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2975 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2976 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2977 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 2978 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2979 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2980 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 2981 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 2982 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2983 // CHECK9-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 2984 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2985 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2986 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2987 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 2988 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 10) 2989 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 2990 // CHECK9: omp.dispatch.cond: 2991 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2992 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 2993 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2994 // CHECK9: cond.true: 2995 // CHECK9-NEXT: br label [[COND_END:%.*]] 2996 // CHECK9: cond.false: 2997 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2998 // CHECK9-NEXT: br label [[COND_END]] 2999 // CHECK9: cond.end: 3000 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 3001 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3002 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3003 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 3004 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3005 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3006 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 3007 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 3008 // CHECK9: omp.dispatch.body: 3009 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3010 // CHECK9: omp.inner.for.cond: 3011 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3012 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 3013 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 3014 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3015 // CHECK9: omp.inner.for.body: 3016 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3017 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 3018 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3019 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27 3020 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !27 3021 // CHECK9-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 3022 // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]] 3023 // CHECK9-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27 3024 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3025 // CHECK9: omp.body.continue: 3026 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3027 // CHECK9: omp.inner.for.inc: 3028 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3029 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 3030 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3031 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 3032 // CHECK9: omp.inner.for.end: 3033 // CHECK9-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 3034 // CHECK9: omp.dispatch.inc: 3035 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3036 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 3037 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 3038 // CHECK9-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4 3039 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3040 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 3041 // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 3042 // CHECK9-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4 3043 // CHECK9-NEXT: br label [[OMP_DISPATCH_COND]] 3044 // CHECK9: omp.dispatch.end: 3045 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 3046 // CHECK9-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3047 // CHECK9-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 3048 // CHECK9-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3049 // CHECK9: .omp.final.then: 3050 // CHECK9-NEXT: store i32 10, i32* [[I]], align 4 3051 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 3052 // CHECK9: .omp.final.done: 3053 // CHECK9-NEXT: ret void 3054 // 3055 // 3056 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 3057 // CHECK9-SAME: () #[[ATTR5:[0-9]+]] { 3058 // CHECK9-NEXT: entry: 3059 // CHECK9-NEXT: call void @__tgt_register_requires(i64 1) 3060 // CHECK9-NEXT: ret void 3061 // 3062 // 3063 // CHECK10-LABEL: define {{[^@]+}}@main 3064 // CHECK10-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 3065 // CHECK10-NEXT: entry: 3066 // CHECK10-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 3067 // CHECK10-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 3068 // CHECK10-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 3069 // CHECK10-NEXT: [[N:%.*]] = alloca i32, align 4 3070 // CHECK10-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 3071 // CHECK10-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 3072 // CHECK10-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8 3073 // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 3074 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 3075 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 3076 // CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 3077 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 3078 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3079 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 3080 // CHECK10-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8 3081 // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 3082 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 3083 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 3084 // CHECK10-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 3085 // CHECK10-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 3086 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 3087 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 3088 // CHECK10-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 3089 // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [3 x i8*], align 8 3090 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [3 x i8*], align 8 3091 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [3 x i8*], align 8 3092 // CHECK10-NEXT: [[DOTOFFLOAD_SIZES23:%.*]] = alloca [3 x i64], align 8 3093 // CHECK10-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 3094 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 3095 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 3096 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 3097 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 3098 // CHECK10-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 3099 // CHECK10-NEXT: store i32 100, i32* [[N]], align 4 3100 // CHECK10-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4 3101 // CHECK10-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 3102 // CHECK10-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 3103 // CHECK10-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 3104 // CHECK10-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 3105 // CHECK10-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 3106 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[N]], align 4 3107 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[N_CASTED]] to i32* 3108 // CHECK10-NEXT: store i32 [[TMP3]], i32* [[CONV]], align 4 3109 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[N_CASTED]], align 8 3110 // CHECK10-NEXT: [[TMP5:%.*]] = mul nuw i64 [[TMP1]], 4 3111 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3112 // CHECK10-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 3113 // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP7]], align 8 3114 // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3115 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 3116 // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 3117 // CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 3118 // CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 3119 // CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3120 // CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 3121 // CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 3122 // CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* 3123 // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 3124 // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 3125 // CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* 3126 // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 3127 // CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 3128 // CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 3129 // CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 3130 // CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 3131 // CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 3132 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** 3133 // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 3134 // CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 3135 // CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** 3136 // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 3137 // CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 3138 // CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 3139 // CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 3140 // CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 3141 // CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3142 // CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3143 // CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 3144 // CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 3145 // CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 3146 // CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3147 // CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 3148 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 3149 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 3150 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 3151 // CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3152 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 3153 // CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 3154 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) 3155 // CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 3156 // CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 3157 // CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3158 // CHECK10: omp_offload.failed: 3159 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] 3160 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] 3161 // CHECK10: omp_offload.cont: 3162 // CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 3163 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* 3164 // CHECK10-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 3165 // CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 3166 // CHECK10-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 3167 // CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 3168 // CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* 3169 // CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 3170 // CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 3171 // CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* 3172 // CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 3173 // CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 3174 // CHECK10-NEXT: store i64 4, i64* [[TMP40]], align 8 3175 // CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 3176 // CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 3177 // CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 3178 // CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* 3179 // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 3180 // CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 3181 // CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* 3182 // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 3183 // CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 3184 // CHECK10-NEXT: store i64 8, i64* [[TMP46]], align 8 3185 // CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 3186 // CHECK10-NEXT: store i8* null, i8** [[TMP47]], align 8 3187 // CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 3188 // CHECK10-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** 3189 // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 3190 // CHECK10-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 3191 // CHECK10-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** 3192 // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 3193 // CHECK10-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 3194 // CHECK10-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 3195 // CHECK10-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 3196 // CHECK10-NEXT: store i8* null, i8** [[TMP53]], align 8 3197 // CHECK10-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 3198 // CHECK10-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 3199 // CHECK10-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 3200 // CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 3201 // CHECK10-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 3202 // CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 3203 // CHECK10-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 3204 // CHECK10-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 3205 // CHECK10-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 3206 // CHECK10-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 3207 // CHECK10-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 3208 // CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 3209 // CHECK10-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 3210 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) 3211 // CHECK10-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 3212 // CHECK10-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 3213 // CHECK10-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] 3214 // CHECK10: omp_offload.failed16: 3215 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] 3216 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT17]] 3217 // CHECK10: omp_offload.cont17: 3218 // CHECK10-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 3219 // CHECK10-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* 3220 // CHECK10-NEXT: store i32 [[TMP63]], i32* [[CONV19]], align 4 3221 // CHECK10-NEXT: [[TMP64:%.*]] = load i64, i64* [[N_CASTED18]], align 8 3222 // CHECK10-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP1]], 4 3223 // CHECK10-NEXT: [[TMP66:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 3224 // CHECK10-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* 3225 // CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP67]], align 8 3226 // CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 3227 // CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* 3228 // CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 3229 // CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 3230 // CHECK10-NEXT: store i64 4, i64* [[TMP70]], align 8 3231 // CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 3232 // CHECK10-NEXT: store i8* null, i8** [[TMP71]], align 8 3233 // CHECK10-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 3234 // CHECK10-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* 3235 // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 3236 // CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 3237 // CHECK10-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* 3238 // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP75]], align 8 3239 // CHECK10-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 1 3240 // CHECK10-NEXT: store i64 8, i64* [[TMP76]], align 8 3241 // CHECK10-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 3242 // CHECK10-NEXT: store i8* null, i8** [[TMP77]], align 8 3243 // CHECK10-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 3244 // CHECK10-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32** 3245 // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP79]], align 8 3246 // CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 3247 // CHECK10-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** 3248 // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 8 3249 // CHECK10-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 2 3250 // CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP82]], align 8 3251 // CHECK10-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 3252 // CHECK10-NEXT: store i8* null, i8** [[TMP83]], align 8 3253 // CHECK10-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 3254 // CHECK10-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 3255 // CHECK10-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 3256 // CHECK10-NEXT: [[TMP87:%.*]] = load i32, i32* [[N]], align 4 3257 // CHECK10-NEXT: store i32 [[TMP87]], i32* [[DOTCAPTURE_EXPR_25]], align 4 3258 // CHECK10-NEXT: [[TMP88:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 3259 // CHECK10-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP88]], 0 3260 // CHECK10-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 3261 // CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 3262 // CHECK10-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 3263 // CHECK10-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 3264 // CHECK10-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP89]], 1 3265 // CHECK10-NEXT: [[TMP90:%.*]] = zext i32 [[ADD30]] to i64 3266 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP90]]) 3267 // CHECK10-NEXT: [[TMP91:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP84]], i8** [[TMP85]], i64* [[TMP86]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 3268 // CHECK10-NEXT: [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0 3269 // CHECK10-NEXT: br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] 3270 // CHECK10: omp_offload.failed31: 3271 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP64]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] 3272 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT32]] 3273 // CHECK10: omp_offload.cont32: 3274 // CHECK10-NEXT: [[TMP93:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 3275 // CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP93]]) 3276 // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 3277 // CHECK10-NEXT: [[TMP94:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 3278 // CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP94]]) 3279 // CHECK10-NEXT: [[TMP95:%.*]] = load i32, i32* [[RETVAL]], align 4 3280 // CHECK10-NEXT: ret i32 [[TMP95]] 3281 // 3282 // 3283 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 3284 // CHECK10-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] { 3285 // CHECK10-NEXT: entry: 3286 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 3287 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3288 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 3289 // CHECK10-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 3290 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3291 // CHECK10-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 3292 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* 3293 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3294 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 3295 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) 3296 // CHECK10-NEXT: ret void 3297 // 3298 // 3299 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined. 3300 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 3301 // CHECK10-NEXT: entry: 3302 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3303 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3304 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 3305 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3306 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 3307 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3308 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 3309 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3310 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 3311 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 3312 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3313 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3314 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3315 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3316 // CHECK10-NEXT: [[I3:%.*]] = alloca i32, align 4 3317 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3318 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3319 // CHECK10-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 3320 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3321 // CHECK10-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 3322 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 3323 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3324 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 3325 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 3326 // CHECK10-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 3327 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3328 // CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 3329 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 3330 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 3331 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 3332 // CHECK10-NEXT: store i32 0, i32* [[I]], align 4 3333 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3334 // CHECK10-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 3335 // CHECK10-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 3336 // CHECK10: omp.precond.then: 3337 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3338 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3339 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 3340 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3341 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3342 // CHECK10-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3343 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 3344 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3345 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3346 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3347 // CHECK10-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] 3348 // CHECK10-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3349 // CHECK10: cond.true: 3350 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3351 // CHECK10-NEXT: br label [[COND_END:%.*]] 3352 // CHECK10: cond.false: 3353 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3354 // CHECK10-NEXT: br label [[COND_END]] 3355 // CHECK10: cond.end: 3356 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 3357 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3358 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3359 // CHECK10-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 3360 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3361 // CHECK10: omp.inner.for.cond: 3362 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 3363 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !9 3364 // CHECK10-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 3365 // CHECK10-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3366 // CHECK10: omp.inner.for.body: 3367 // CHECK10-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 3368 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 3369 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3370 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !9 3371 // CHECK10-NEXT: [[TMP17:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !9 3372 // CHECK10-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 3373 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[IDXPROM]] 3374 // CHECK10-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !9 3375 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3376 // CHECK10: omp.body.continue: 3377 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3378 // CHECK10: omp.inner.for.inc: 3379 // CHECK10-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 3380 // CHECK10-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 3381 // CHECK10-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 3382 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] 3383 // CHECK10: omp.inner.for.end: 3384 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3385 // CHECK10: omp.loop.exit: 3386 // CHECK10-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3387 // CHECK10-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4 3388 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]]) 3389 // CHECK10-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3390 // CHECK10-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 3391 // CHECK10-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3392 // CHECK10: .omp.final.then: 3393 // CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3394 // CHECK10-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 3395 // CHECK10-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 3396 // CHECK10-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 3397 // CHECK10-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]] 3398 // CHECK10-NEXT: store i32 [[ADD10]], i32* [[I3]], align 4 3399 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 3400 // CHECK10: .omp.final.done: 3401 // CHECK10-NEXT: br label [[OMP_PRECOND_END]] 3402 // CHECK10: omp.precond.end: 3403 // CHECK10-NEXT: ret void 3404 // 3405 // 3406 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105 3407 // CHECK10-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 3408 // CHECK10-NEXT: entry: 3409 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 3410 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3411 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 3412 // CHECK10-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 3413 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3414 // CHECK10-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 3415 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* 3416 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3417 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 3418 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) 3419 // CHECK10-NEXT: ret void 3420 // 3421 // 3422 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..1 3423 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 3424 // CHECK10-NEXT: entry: 3425 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3426 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3427 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 3428 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3429 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 3430 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3431 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 3432 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3433 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 3434 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 3435 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3436 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3437 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3438 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3439 // CHECK10-NEXT: [[I3:%.*]] = alloca i32, align 4 3440 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3441 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3442 // CHECK10-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 3443 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3444 // CHECK10-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 3445 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 3446 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3447 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 3448 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 3449 // CHECK10-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 3450 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3451 // CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 3452 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 3453 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 3454 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 3455 // CHECK10-NEXT: store i32 0, i32* [[I]], align 4 3456 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3457 // CHECK10-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 3458 // CHECK10-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 3459 // CHECK10: omp.precond.then: 3460 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3461 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3462 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 3463 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3464 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3465 // CHECK10-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3466 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 3467 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3468 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3469 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3470 // CHECK10-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] 3471 // CHECK10-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3472 // CHECK10: cond.true: 3473 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3474 // CHECK10-NEXT: br label [[COND_END:%.*]] 3475 // CHECK10: cond.false: 3476 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3477 // CHECK10-NEXT: br label [[COND_END]] 3478 // CHECK10: cond.end: 3479 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 3480 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3481 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3482 // CHECK10-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 3483 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3484 // CHECK10: omp.inner.for.cond: 3485 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 3486 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15 3487 // CHECK10-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 3488 // CHECK10-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3489 // CHECK10: omp.inner.for.body: 3490 // CHECK10-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 3491 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 3492 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3493 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !15 3494 // CHECK10-NEXT: [[TMP17:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !15 3495 // CHECK10-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64 3496 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[IDXPROM]] 3497 // CHECK10-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !15 3498 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3499 // CHECK10: omp.body.continue: 3500 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3501 // CHECK10: omp.inner.for.inc: 3502 // CHECK10-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 3503 // CHECK10-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 3504 // CHECK10-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 3505 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 3506 // CHECK10: omp.inner.for.end: 3507 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3508 // CHECK10: omp.loop.exit: 3509 // CHECK10-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3510 // CHECK10-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4 3511 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]]) 3512 // CHECK10-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3513 // CHECK10-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 3514 // CHECK10-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3515 // CHECK10: .omp.final.then: 3516 // CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3517 // CHECK10-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 3518 // CHECK10-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 3519 // CHECK10-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 3520 // CHECK10-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]] 3521 // CHECK10-NEXT: store i32 [[ADD10]], i32* [[I3]], align 4 3522 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 3523 // CHECK10: .omp.final.done: 3524 // CHECK10-NEXT: br label [[OMP_PRECOND_END]] 3525 // CHECK10: omp.precond.end: 3526 // CHECK10-NEXT: ret void 3527 // 3528 // 3529 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110 3530 // CHECK10-SAME: (i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 3531 // CHECK10-NEXT: entry: 3532 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 3533 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3534 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 3535 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 3536 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 3537 // CHECK10-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 3538 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3539 // CHECK10-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 3540 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* 3541 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3542 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 3543 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[CONV]], align 4 3544 // CHECK10-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4 3545 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 3546 // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* 3547 // CHECK10-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 3548 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 3549 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) 3550 // CHECK10-NEXT: ret void 3551 // 3552 // 3553 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3 3554 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 3555 // CHECK10-NEXT: entry: 3556 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3557 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3558 // CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 8 3559 // CHECK10-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 3560 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 8 3561 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 3562 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3563 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 3564 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 3565 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 3566 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 3567 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3568 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3569 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3570 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3571 // CHECK10-NEXT: [[I4:%.*]] = alloca i32, align 4 3572 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3573 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3574 // CHECK10-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 8 3575 // CHECK10-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 3576 // CHECK10-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8 3577 // CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 3578 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 8 3579 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 3580 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 8 3581 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i32* 3582 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 3583 // CHECK10-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_1]], align 4 3584 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3585 // CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 3586 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 3587 // CHECK10-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 3588 // CHECK10-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 3589 // CHECK10-NEXT: store i32 0, i32* [[I]], align 4 3590 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3591 // CHECK10-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 3592 // CHECK10-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 3593 // CHECK10: omp.precond.then: 3594 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3595 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 3596 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 3597 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3598 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3599 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[CONV]], align 4 3600 // CHECK10-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3601 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 3602 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]]) 3603 // CHECK10-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 3604 // CHECK10: omp.dispatch.cond: 3605 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3606 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 3607 // CHECK10-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]] 3608 // CHECK10-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3609 // CHECK10: cond.true: 3610 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 3611 // CHECK10-NEXT: br label [[COND_END:%.*]] 3612 // CHECK10: cond.false: 3613 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3614 // CHECK10-NEXT: br label [[COND_END]] 3615 // CHECK10: cond.end: 3616 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ] 3617 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3618 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3619 // CHECK10-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4 3620 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3621 // CHECK10-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3622 // CHECK10-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] 3623 // CHECK10-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 3624 // CHECK10: omp.dispatch.body: 3625 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3626 // CHECK10: omp.inner.for.cond: 3627 // CHECK10-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 3628 // CHECK10-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18 3629 // CHECK10-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 3630 // CHECK10-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3631 // CHECK10: omp.inner.for.body: 3632 // CHECK10-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 3633 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1 3634 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3635 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !18 3636 // CHECK10-NEXT: [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !18 3637 // CHECK10-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64 3638 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[IDXPROM]] 3639 // CHECK10-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !18 3640 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3641 // CHECK10: omp.body.continue: 3642 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3643 // CHECK10: omp.inner.for.inc: 3644 // CHECK10-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 3645 // CHECK10-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP21]], 1 3646 // CHECK10-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 3647 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 3648 // CHECK10: omp.inner.for.end: 3649 // CHECK10-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 3650 // CHECK10: omp.dispatch.inc: 3651 // CHECK10-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3652 // CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 3653 // CHECK10-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]] 3654 // CHECK10-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_LB]], align 4 3655 // CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3656 // CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 3657 // CHECK10-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]] 3658 // CHECK10-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_UB]], align 4 3659 // CHECK10-NEXT: br label [[OMP_DISPATCH_COND]] 3660 // CHECK10: omp.dispatch.end: 3661 // CHECK10-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3662 // CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 3663 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]]) 3664 // CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3665 // CHECK10-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 3666 // CHECK10-NEXT: br i1 [[TMP29]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3667 // CHECK10: .omp.final.then: 3668 // CHECK10-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 3669 // CHECK10-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP30]], 0 3670 // CHECK10-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 3671 // CHECK10-NEXT: [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1 3672 // CHECK10-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 3673 // CHECK10-NEXT: store i32 [[ADD14]], i32* [[I4]], align 4 3674 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 3675 // CHECK10: .omp.final.done: 3676 // CHECK10-NEXT: br label [[OMP_PRECOND_END]] 3677 // CHECK10: omp.precond.end: 3678 // CHECK10-NEXT: ret void 3679 // 3680 // 3681 // CHECK10-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_ 3682 // CHECK10-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat { 3683 // CHECK10-NEXT: entry: 3684 // CHECK10-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 3685 // CHECK10-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 3686 // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 3687 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 3688 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 3689 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 3690 // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8 3691 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8 3692 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8 3693 // CHECK10-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 3694 // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 8 3695 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 8 3696 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 8 3697 // CHECK10-NEXT: [[_TMP10:%.*]] = alloca i32, align 4 3698 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 3699 // CHECK10-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3700 // CHECK10-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]** 3701 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 8 3702 // CHECK10-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3703 // CHECK10-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]** 3704 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 8 3705 // CHECK10-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3706 // CHECK10-NEXT: store i8* null, i8** [[TMP4]], align 8 3707 // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3708 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3709 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 3710 // CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 3711 // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 3712 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3713 // CHECK10: omp_offload.failed: 3714 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79([10 x i32]* [[A]]) #[[ATTR3]] 3715 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] 3716 // CHECK10: omp_offload.cont: 3717 // CHECK10-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 3718 // CHECK10-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]** 3719 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 8 3720 // CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 3721 // CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]** 3722 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 8 3723 // CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0 3724 // CHECK10-NEXT: store i8* null, i8** [[TMP13]], align 8 3725 // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 3726 // CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 3727 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 3728 // CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 3729 // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 3730 // CHECK10-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 3731 // CHECK10: omp_offload.failed5: 3732 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84([10 x i32]* [[A]]) #[[ATTR3]] 3733 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT6]] 3734 // CHECK10: omp_offload.cont6: 3735 // CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 3736 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to [10 x i32]** 3737 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP19]], align 8 3738 // CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 3739 // CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [10 x i32]** 3740 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP21]], align 8 3741 // CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i64 0, i64 0 3742 // CHECK10-NEXT: store i8* null, i8** [[TMP22]], align 8 3743 // CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 3744 // CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 3745 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 3746 // CHECK10-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 3747 // CHECK10-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 3748 // CHECK10-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] 3749 // CHECK10: omp_offload.failed11: 3750 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89([10 x i32]* [[A]]) #[[ATTR3]] 3751 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT12]] 3752 // CHECK10: omp_offload.cont12: 3753 // CHECK10-NEXT: ret i32 0 3754 // 3755 // 3756 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79 3757 // CHECK10-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 3758 // CHECK10-NEXT: entry: 3759 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 3760 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 3761 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 3762 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 3763 // CHECK10-NEXT: ret void 3764 // 3765 // 3766 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 3767 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 3768 // CHECK10-NEXT: entry: 3769 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3770 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3771 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 3772 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3773 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 3774 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3775 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3776 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3777 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3778 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 3779 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3780 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3781 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 3782 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 3783 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3784 // CHECK10-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 3785 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3786 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3787 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3788 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 3789 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3790 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3791 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 3792 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3793 // CHECK10: cond.true: 3794 // CHECK10-NEXT: br label [[COND_END:%.*]] 3795 // CHECK10: cond.false: 3796 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3797 // CHECK10-NEXT: br label [[COND_END]] 3798 // CHECK10: cond.end: 3799 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 3800 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3801 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3802 // CHECK10-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 3803 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3804 // CHECK10: omp.inner.for.cond: 3805 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 3806 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !21 3807 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 3808 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3809 // CHECK10: omp.inner.for.body: 3810 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 3811 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 3812 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3813 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !21 3814 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !21 3815 // CHECK10-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 3816 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]] 3817 // CHECK10-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !21 3818 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3819 // CHECK10: omp.body.continue: 3820 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3821 // CHECK10: omp.inner.for.inc: 3822 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 3823 // CHECK10-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 3824 // CHECK10-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !21 3825 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP22:![0-9]+]] 3826 // CHECK10: omp.inner.for.end: 3827 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3828 // CHECK10: omp.loop.exit: 3829 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 3830 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3831 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 3832 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3833 // CHECK10: .omp.final.then: 3834 // CHECK10-NEXT: store i32 10, i32* [[I]], align 4 3835 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 3836 // CHECK10: .omp.final.done: 3837 // CHECK10-NEXT: ret void 3838 // 3839 // 3840 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84 3841 // CHECK10-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 3842 // CHECK10-NEXT: entry: 3843 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 3844 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 3845 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 3846 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 3847 // CHECK10-NEXT: ret void 3848 // 3849 // 3850 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 3851 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 3852 // CHECK10-NEXT: entry: 3853 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3854 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3855 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 3856 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3857 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 3858 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3859 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3860 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3861 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3862 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 3863 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3864 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3865 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 3866 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 3867 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3868 // CHECK10-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 3869 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3870 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3871 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3872 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 3873 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3874 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3875 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 3876 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3877 // CHECK10: cond.true: 3878 // CHECK10-NEXT: br label [[COND_END:%.*]] 3879 // CHECK10: cond.false: 3880 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3881 // CHECK10-NEXT: br label [[COND_END]] 3882 // CHECK10: cond.end: 3883 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 3884 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3885 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3886 // CHECK10-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 3887 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3888 // CHECK10: omp.inner.for.cond: 3889 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 3890 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !24 3891 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 3892 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3893 // CHECK10: omp.inner.for.body: 3894 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 3895 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 3896 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3897 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !24 3898 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !24 3899 // CHECK10-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64 3900 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]] 3901 // CHECK10-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !24 3902 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3903 // CHECK10: omp.body.continue: 3904 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3905 // CHECK10: omp.inner.for.inc: 3906 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 3907 // CHECK10-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 3908 // CHECK10-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 3909 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 3910 // CHECK10: omp.inner.for.end: 3911 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3912 // CHECK10: omp.loop.exit: 3913 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 3914 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3915 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 3916 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3917 // CHECK10: .omp.final.then: 3918 // CHECK10-NEXT: store i32 10, i32* [[I]], align 4 3919 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 3920 // CHECK10: .omp.final.done: 3921 // CHECK10-NEXT: ret void 3922 // 3923 // 3924 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89 3925 // CHECK10-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 3926 // CHECK10-NEXT: entry: 3927 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 3928 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 3929 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 3930 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 3931 // CHECK10-NEXT: ret void 3932 // 3933 // 3934 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 3935 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 3936 // CHECK10-NEXT: entry: 3937 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3938 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3939 // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 3940 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3941 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 3942 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3943 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3944 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3945 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3946 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 3947 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3948 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3949 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 3950 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 3951 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3952 // CHECK10-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 3953 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3954 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3955 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3956 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 3957 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 10) 3958 // CHECK10-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 3959 // CHECK10: omp.dispatch.cond: 3960 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3961 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 3962 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3963 // CHECK10: cond.true: 3964 // CHECK10-NEXT: br label [[COND_END:%.*]] 3965 // CHECK10: cond.false: 3966 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3967 // CHECK10-NEXT: br label [[COND_END]] 3968 // CHECK10: cond.end: 3969 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 3970 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3971 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3972 // CHECK10-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 3973 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3974 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3975 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 3976 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 3977 // CHECK10: omp.dispatch.body: 3978 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3979 // CHECK10: omp.inner.for.cond: 3980 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3981 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 3982 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 3983 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3984 // CHECK10: omp.inner.for.body: 3985 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3986 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 3987 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3988 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27 3989 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !27 3990 // CHECK10-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 3991 // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]] 3992 // CHECK10-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !27 3993 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3994 // CHECK10: omp.body.continue: 3995 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3996 // CHECK10: omp.inner.for.inc: 3997 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 3998 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 3999 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 4000 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 4001 // CHECK10: omp.inner.for.end: 4002 // CHECK10-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 4003 // CHECK10: omp.dispatch.inc: 4004 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4005 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4006 // CHECK10-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 4007 // CHECK10-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4 4008 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4009 // CHECK10-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4010 // CHECK10-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 4011 // CHECK10-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4 4012 // CHECK10-NEXT: br label [[OMP_DISPATCH_COND]] 4013 // CHECK10: omp.dispatch.end: 4014 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 4015 // CHECK10-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4016 // CHECK10-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 4017 // CHECK10-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4018 // CHECK10: .omp.final.then: 4019 // CHECK10-NEXT: store i32 10, i32* [[I]], align 4 4020 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 4021 // CHECK10: .omp.final.done: 4022 // CHECK10-NEXT: ret void 4023 // 4024 // 4025 // CHECK10-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 4026 // CHECK10-SAME: () #[[ATTR5:[0-9]+]] { 4027 // CHECK10-NEXT: entry: 4028 // CHECK10-NEXT: call void @__tgt_register_requires(i64 1) 4029 // CHECK10-NEXT: ret void 4030 // 4031 // 4032 // CHECK11-LABEL: define {{[^@]+}}@main 4033 // CHECK11-SAME: (i32 noundef [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 4034 // CHECK11-NEXT: entry: 4035 // CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 4036 // CHECK11-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 4037 // CHECK11-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 4 4038 // CHECK11-NEXT: [[N:%.*]] = alloca i32, align 4 4039 // CHECK11-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 4040 // CHECK11-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 4041 // CHECK11-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4 4042 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 4043 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 4044 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 4045 // CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 4046 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 4047 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 4048 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 4049 // CHECK11-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4 4050 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 4051 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 4052 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 4053 // CHECK11-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 4054 // CHECK11-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 4055 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 4056 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 4057 // CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 4058 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [3 x i8*], align 4 4059 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [3 x i8*], align 4 4060 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [3 x i8*], align 4 4061 // CHECK11-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [3 x i64], align 4 4062 // CHECK11-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 4063 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 4064 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 4065 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 4066 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 4067 // CHECK11-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 4068 // CHECK11-NEXT: store i32 100, i32* [[N]], align 4 4069 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4 4070 // CHECK11-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 4071 // CHECK11-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 4072 // CHECK11-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 4073 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 4074 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[N]], align 4 4075 // CHECK11-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 4076 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 4077 // CHECK11-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4 4078 // CHECK11-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64 4079 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4080 // CHECK11-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32* 4081 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4 4082 // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4083 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* 4084 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 4085 // CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 4086 // CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 4087 // CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4088 // CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 4089 // CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 4090 // CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 4091 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 4092 // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 4093 // CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 4094 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 4095 // CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 4096 // CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 4097 // CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 4098 // CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 4099 // CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 4100 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** 4101 // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 4102 // CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 4103 // CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** 4104 // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 4105 // CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 4106 // CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 4107 // CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 4108 // CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 4109 // CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4110 // CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4111 // CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 4112 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 4113 // CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 4114 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4115 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 4116 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 4117 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 4118 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 4119 // CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4120 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 4121 // CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 4122 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) 4123 // CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 4124 // CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 4125 // CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4126 // CHECK11: omp_offload.failed: 4127 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] 4128 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] 4129 // CHECK11: omp_offload.cont: 4130 // CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 4131 // CHECK11-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 4132 // CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 4133 // CHECK11-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 4134 // CHECK11-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 4135 // CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 4136 // CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* 4137 // CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 4138 // CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 4139 // CHECK11-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* 4140 // CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 4141 // CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 4142 // CHECK11-NEXT: store i64 4, i64* [[TMP41]], align 4 4143 // CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 4144 // CHECK11-NEXT: store i8* null, i8** [[TMP42]], align 4 4145 // CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 4146 // CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* 4147 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 4148 // CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 4149 // CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* 4150 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 4151 // CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 4152 // CHECK11-NEXT: store i64 4, i64* [[TMP47]], align 4 4153 // CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 4154 // CHECK11-NEXT: store i8* null, i8** [[TMP48]], align 4 4155 // CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 4156 // CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** 4157 // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 4158 // CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 4159 // CHECK11-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** 4160 // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 4161 // CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 4162 // CHECK11-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 4163 // CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 4164 // CHECK11-NEXT: store i8* null, i8** [[TMP54]], align 4 4165 // CHECK11-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 4166 // CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 4167 // CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 4168 // CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 4169 // CHECK11-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 4170 // CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 4171 // CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 4172 // CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 4173 // CHECK11-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 4174 // CHECK11-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 4175 // CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 4176 // CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 4177 // CHECK11-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 4178 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) 4179 // CHECK11-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 4180 // CHECK11-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 4181 // CHECK11-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] 4182 // CHECK11: omp_offload.failed15: 4183 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] 4184 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]] 4185 // CHECK11: omp_offload.cont16: 4186 // CHECK11-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 4187 // CHECK11-NEXT: store i32 [[TMP64]], i32* [[N_CASTED17]], align 4 4188 // CHECK11-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_CASTED17]], align 4 4189 // CHECK11-NEXT: [[TMP66:%.*]] = mul nuw i32 [[TMP0]], 4 4190 // CHECK11-NEXT: [[TMP67:%.*]] = sext i32 [[TMP66]] to i64 4191 // CHECK11-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 4192 // CHECK11-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* 4193 // CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP69]], align 4 4194 // CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 4195 // CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* 4196 // CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 4197 // CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 4198 // CHECK11-NEXT: store i64 4, i64* [[TMP72]], align 4 4199 // CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 4200 // CHECK11-NEXT: store i8* null, i8** [[TMP73]], align 4 4201 // CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 4202 // CHECK11-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* 4203 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 4204 // CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 4205 // CHECK11-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* 4206 // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP77]], align 4 4207 // CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 4208 // CHECK11-NEXT: store i64 4, i64* [[TMP78]], align 4 4209 // CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 4210 // CHECK11-NEXT: store i8* null, i8** [[TMP79]], align 4 4211 // CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 4212 // CHECK11-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** 4213 // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 4 4214 // CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 4215 // CHECK11-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32** 4216 // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP83]], align 4 4217 // CHECK11-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 4218 // CHECK11-NEXT: store i64 [[TMP67]], i64* [[TMP84]], align 4 4219 // CHECK11-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 4220 // CHECK11-NEXT: store i8* null, i8** [[TMP85]], align 4 4221 // CHECK11-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 4222 // CHECK11-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 4223 // CHECK11-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 4224 // CHECK11-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 4225 // CHECK11-NEXT: store i32 [[TMP89]], i32* [[DOTCAPTURE_EXPR_23]], align 4 4226 // CHECK11-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 4227 // CHECK11-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP90]], 0 4228 // CHECK11-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 4229 // CHECK11-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 4230 // CHECK11-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 4231 // CHECK11-NEXT: [[TMP91:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 4232 // CHECK11-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP91]], 1 4233 // CHECK11-NEXT: [[TMP92:%.*]] = zext i32 [[ADD28]] to i64 4234 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP92]]) 4235 // CHECK11-NEXT: [[TMP93:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP86]], i8** [[TMP87]], i64* [[TMP88]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 4236 // CHECK11-NEXT: [[TMP94:%.*]] = icmp ne i32 [[TMP93]], 0 4237 // CHECK11-NEXT: br i1 [[TMP94]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] 4238 // CHECK11: omp_offload.failed29: 4239 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP65]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] 4240 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT30]] 4241 // CHECK11: omp_offload.cont30: 4242 // CHECK11-NEXT: [[TMP95:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 4243 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP95]]) 4244 // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 4245 // CHECK11-NEXT: [[TMP96:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 4246 // CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP96]]) 4247 // CHECK11-NEXT: [[TMP97:%.*]] = load i32, i32* [[RETVAL]], align 4 4248 // CHECK11-NEXT: ret i32 [[TMP97]] 4249 // 4250 // 4251 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 4252 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] { 4253 // CHECK11-NEXT: entry: 4254 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4255 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4256 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 4257 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4258 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4259 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 4260 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4261 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 4262 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) 4263 // CHECK11-NEXT: ret void 4264 // 4265 // 4266 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined. 4267 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 4268 // CHECK11-NEXT: entry: 4269 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4270 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4271 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4 4272 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4273 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 4274 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4275 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 4276 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 4277 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 4278 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 4279 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4280 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4281 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4282 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4283 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4 4284 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4285 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4286 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4 4287 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4288 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 4289 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4 4290 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4291 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 4 4292 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 4293 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 4294 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4295 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 4296 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 4297 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 4298 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 4299 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4 4300 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4301 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 4302 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 4303 // CHECK11: omp.precond.then: 4304 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4305 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4306 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 4307 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4308 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4309 // CHECK11-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4310 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 4311 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4312 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4313 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4314 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] 4315 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4316 // CHECK11: cond.true: 4317 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4318 // CHECK11-NEXT: br label [[COND_END:%.*]] 4319 // CHECK11: cond.false: 4320 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4321 // CHECK11-NEXT: br label [[COND_END]] 4322 // CHECK11: cond.end: 4323 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 4324 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4325 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4326 // CHECK11-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 4327 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4328 // CHECK11: omp.inner.for.cond: 4329 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 4330 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10 4331 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 4332 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4333 // CHECK11: omp.inner.for.body: 4334 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 4335 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 4336 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4337 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !10 4338 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !10 4339 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 [[TMP17]] 4340 // CHECK11-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !10 4341 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4342 // CHECK11: omp.body.continue: 4343 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4344 // CHECK11: omp.inner.for.inc: 4345 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 4346 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 4347 // CHECK11-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 4348 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] 4349 // CHECK11: omp.inner.for.end: 4350 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4351 // CHECK11: omp.loop.exit: 4352 // CHECK11-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4353 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4 4354 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]]) 4355 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4356 // CHECK11-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 4357 // CHECK11-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4358 // CHECK11: .omp.final.then: 4359 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4360 // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 4361 // CHECK11-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 4362 // CHECK11-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 4363 // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]] 4364 // CHECK11-NEXT: store i32 [[ADD10]], i32* [[I3]], align 4 4365 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 4366 // CHECK11: .omp.final.done: 4367 // CHECK11-NEXT: br label [[OMP_PRECOND_END]] 4368 // CHECK11: omp.precond.end: 4369 // CHECK11-NEXT: ret void 4370 // 4371 // 4372 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105 4373 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 4374 // CHECK11-NEXT: entry: 4375 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4376 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4377 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 4378 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4379 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4380 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 4381 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4382 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 4383 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) 4384 // CHECK11-NEXT: ret void 4385 // 4386 // 4387 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1 4388 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 4389 // CHECK11-NEXT: entry: 4390 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4391 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4392 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4 4393 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4394 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 4395 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4396 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 4397 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 4398 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 4399 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 4400 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4401 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4402 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4403 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4404 // CHECK11-NEXT: [[I3:%.*]] = alloca i32, align 4 4405 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4406 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4407 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4 4408 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4409 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 4410 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4 4411 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4412 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 4 4413 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 4414 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 4415 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4416 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 4417 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 4418 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 4419 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 4420 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4 4421 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4422 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 4423 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 4424 // CHECK11: omp.precond.then: 4425 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4426 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4427 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 4428 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4429 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4430 // CHECK11-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4431 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 4432 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4433 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4434 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4435 // CHECK11-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] 4436 // CHECK11-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4437 // CHECK11: cond.true: 4438 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4439 // CHECK11-NEXT: br label [[COND_END:%.*]] 4440 // CHECK11: cond.false: 4441 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4442 // CHECK11-NEXT: br label [[COND_END]] 4443 // CHECK11: cond.end: 4444 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 4445 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4446 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4447 // CHECK11-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 4448 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4449 // CHECK11: omp.inner.for.cond: 4450 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 4451 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !16 4452 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 4453 // CHECK11-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4454 // CHECK11: omp.inner.for.body: 4455 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 4456 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 4457 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4458 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !16 4459 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !16 4460 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 [[TMP17]] 4461 // CHECK11-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !16 4462 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4463 // CHECK11: omp.body.continue: 4464 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4465 // CHECK11: omp.inner.for.inc: 4466 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 4467 // CHECK11-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 4468 // CHECK11-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 4469 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] 4470 // CHECK11: omp.inner.for.end: 4471 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4472 // CHECK11: omp.loop.exit: 4473 // CHECK11-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4474 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4 4475 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]]) 4476 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4477 // CHECK11-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 4478 // CHECK11-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4479 // CHECK11: .omp.final.then: 4480 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4481 // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 4482 // CHECK11-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 4483 // CHECK11-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 4484 // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]] 4485 // CHECK11-NEXT: store i32 [[ADD10]], i32* [[I3]], align 4 4486 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 4487 // CHECK11: .omp.final.done: 4488 // CHECK11-NEXT: br label [[OMP_PRECOND_END]] 4489 // CHECK11: omp.precond.end: 4490 // CHECK11-NEXT: ret void 4491 // 4492 // 4493 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110 4494 // CHECK11-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 4495 // CHECK11-NEXT: entry: 4496 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 4497 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4498 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 4499 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 4500 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 4501 // CHECK11-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 4502 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4503 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 4504 // CHECK11-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4505 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 4506 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 4507 // CHECK11-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4 4508 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 4509 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4510 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 4511 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) 4512 // CHECK11-NEXT: ret void 4513 // 4514 // 4515 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3 4516 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 4517 // CHECK11-NEXT: entry: 4518 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4519 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4520 // CHECK11-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4 4521 // CHECK11-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 4522 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 4523 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 4524 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4525 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 4526 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 4527 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 4528 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 4529 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4530 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4531 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4532 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4533 // CHECK11-NEXT: [[I4:%.*]] = alloca i32, align 4 4534 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4535 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4536 // CHECK11-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4 4537 // CHECK11-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 4538 // CHECK11-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 4539 // CHECK11-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4540 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4 4541 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 4542 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 4 4543 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 4544 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_1]], align 4 4545 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4546 // CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 4547 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 4548 // CHECK11-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 4549 // CHECK11-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 4550 // CHECK11-NEXT: store i32 0, i32* [[I]], align 4 4551 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4552 // CHECK11-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 4553 // CHECK11-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 4554 // CHECK11: omp.precond.then: 4555 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4556 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 4557 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 4558 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4559 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4560 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 4561 // CHECK11-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4562 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 4563 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]]) 4564 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 4565 // CHECK11: omp.dispatch.cond: 4566 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4567 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 4568 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]] 4569 // CHECK11-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4570 // CHECK11: cond.true: 4571 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 4572 // CHECK11-NEXT: br label [[COND_END:%.*]] 4573 // CHECK11: cond.false: 4574 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4575 // CHECK11-NEXT: br label [[COND_END]] 4576 // CHECK11: cond.end: 4577 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ] 4578 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4579 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4580 // CHECK11-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4 4581 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4582 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4583 // CHECK11-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] 4584 // CHECK11-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 4585 // CHECK11: omp.dispatch.body: 4586 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4587 // CHECK11: omp.inner.for.cond: 4588 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 4589 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19 4590 // CHECK11-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 4591 // CHECK11-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4592 // CHECK11: omp.inner.for.body: 4593 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 4594 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1 4595 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4596 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !19 4597 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !19 4598 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 [[TMP20]] 4599 // CHECK11-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !19 4600 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4601 // CHECK11: omp.body.continue: 4602 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4603 // CHECK11: omp.inner.for.inc: 4604 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 4605 // CHECK11-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP21]], 1 4606 // CHECK11-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 4607 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 4608 // CHECK11: omp.inner.for.end: 4609 // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 4610 // CHECK11: omp.dispatch.inc: 4611 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4612 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4613 // CHECK11-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]] 4614 // CHECK11-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_LB]], align 4 4615 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4616 // CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4617 // CHECK11-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]] 4618 // CHECK11-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_UB]], align 4 4619 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]] 4620 // CHECK11: omp.dispatch.end: 4621 // CHECK11-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4622 // CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 4623 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]]) 4624 // CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4625 // CHECK11-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 4626 // CHECK11-NEXT: br i1 [[TMP29]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4627 // CHECK11: .omp.final.then: 4628 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 4629 // CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP30]], 0 4630 // CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 4631 // CHECK11-NEXT: [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1 4632 // CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 4633 // CHECK11-NEXT: store i32 [[ADD14]], i32* [[I4]], align 4 4634 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 4635 // CHECK11: .omp.final.done: 4636 // CHECK11-NEXT: br label [[OMP_PRECOND_END]] 4637 // CHECK11: omp.precond.end: 4638 // CHECK11-NEXT: ret void 4639 // 4640 // 4641 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_ 4642 // CHECK11-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat { 4643 // CHECK11-NEXT: entry: 4644 // CHECK11-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 4645 // CHECK11-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 4646 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 4647 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 4648 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 4649 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 4650 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4 4651 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4 4652 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4 4653 // CHECK11-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 4654 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 4 4655 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 4 4656 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 4 4657 // CHECK11-NEXT: [[_TMP10:%.*]] = alloca i32, align 4 4658 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 4659 // CHECK11-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4660 // CHECK11-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]** 4661 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4 4662 // CHECK11-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4663 // CHECK11-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]** 4664 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4 4665 // CHECK11-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 4666 // CHECK11-NEXT: store i8* null, i8** [[TMP4]], align 4 4667 // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4668 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4669 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 4670 // CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 4671 // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 4672 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4673 // CHECK11: omp_offload.failed: 4674 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79([10 x i32]* [[A]]) #[[ATTR3]] 4675 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] 4676 // CHECK11: omp_offload.cont: 4677 // CHECK11-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 4678 // CHECK11-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]** 4679 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4 4680 // CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 4681 // CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]** 4682 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4 4683 // CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0 4684 // CHECK11-NEXT: store i8* null, i8** [[TMP13]], align 4 4685 // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 4686 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 4687 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 4688 // CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 4689 // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 4690 // CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 4691 // CHECK11: omp_offload.failed5: 4692 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84([10 x i32]* [[A]]) #[[ATTR3]] 4693 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT6]] 4694 // CHECK11: omp_offload.cont6: 4695 // CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 4696 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to [10 x i32]** 4697 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP19]], align 4 4698 // CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 4699 // CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [10 x i32]** 4700 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP21]], align 4 4701 // CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0 4702 // CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 4703 // CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 4704 // CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 4705 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 4706 // CHECK11-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 4707 // CHECK11-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 4708 // CHECK11-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] 4709 // CHECK11: omp_offload.failed11: 4710 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89([10 x i32]* [[A]]) #[[ATTR3]] 4711 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT12]] 4712 // CHECK11: omp_offload.cont12: 4713 // CHECK11-NEXT: ret i32 0 4714 // 4715 // 4716 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79 4717 // CHECK11-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 4718 // CHECK11-NEXT: entry: 4719 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 4720 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 4721 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 4722 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 4723 // CHECK11-NEXT: ret void 4724 // 4725 // 4726 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 4727 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 4728 // CHECK11-NEXT: entry: 4729 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4730 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4731 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 4732 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4733 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 4734 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4735 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4736 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4737 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4738 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 4739 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4740 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4741 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 4742 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 4743 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4744 // CHECK11-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 4745 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4746 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4747 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4748 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 4749 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4750 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4751 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 4752 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4753 // CHECK11: cond.true: 4754 // CHECK11-NEXT: br label [[COND_END:%.*]] 4755 // CHECK11: cond.false: 4756 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4757 // CHECK11-NEXT: br label [[COND_END]] 4758 // CHECK11: cond.end: 4759 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 4760 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4761 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4762 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 4763 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4764 // CHECK11: omp.inner.for.cond: 4765 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 4766 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22 4767 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 4768 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4769 // CHECK11: omp.inner.for.body: 4770 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 4771 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 4772 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4773 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22 4774 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !22 4775 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]] 4776 // CHECK11-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 4777 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4778 // CHECK11: omp.body.continue: 4779 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4780 // CHECK11: omp.inner.for.inc: 4781 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 4782 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 4783 // CHECK11-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 4784 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 4785 // CHECK11: omp.inner.for.end: 4786 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4787 // CHECK11: omp.loop.exit: 4788 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 4789 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4790 // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 4791 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4792 // CHECK11: .omp.final.then: 4793 // CHECK11-NEXT: store i32 10, i32* [[I]], align 4 4794 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 4795 // CHECK11: .omp.final.done: 4796 // CHECK11-NEXT: ret void 4797 // 4798 // 4799 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84 4800 // CHECK11-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 4801 // CHECK11-NEXT: entry: 4802 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 4803 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 4804 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 4805 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 4806 // CHECK11-NEXT: ret void 4807 // 4808 // 4809 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 4810 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 4811 // CHECK11-NEXT: entry: 4812 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4813 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4814 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 4815 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4816 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 4817 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4818 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4819 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4820 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4821 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 4822 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4823 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4824 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 4825 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 4826 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4827 // CHECK11-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 4828 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4829 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4830 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4831 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 4832 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4833 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4834 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 4835 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4836 // CHECK11: cond.true: 4837 // CHECK11-NEXT: br label [[COND_END:%.*]] 4838 // CHECK11: cond.false: 4839 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4840 // CHECK11-NEXT: br label [[COND_END]] 4841 // CHECK11: cond.end: 4842 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 4843 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4844 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4845 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 4846 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4847 // CHECK11: omp.inner.for.cond: 4848 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 4849 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25 4850 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 4851 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4852 // CHECK11: omp.inner.for.body: 4853 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 4854 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 4855 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4856 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25 4857 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !25 4858 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]] 4859 // CHECK11-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 4860 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4861 // CHECK11: omp.body.continue: 4862 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4863 // CHECK11: omp.inner.for.inc: 4864 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 4865 // CHECK11-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 4866 // CHECK11-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 4867 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] 4868 // CHECK11: omp.inner.for.end: 4869 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4870 // CHECK11: omp.loop.exit: 4871 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 4872 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4873 // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 4874 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4875 // CHECK11: .omp.final.then: 4876 // CHECK11-NEXT: store i32 10, i32* [[I]], align 4 4877 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 4878 // CHECK11: .omp.final.done: 4879 // CHECK11-NEXT: ret void 4880 // 4881 // 4882 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89 4883 // CHECK11-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 4884 // CHECK11-NEXT: entry: 4885 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 4886 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 4887 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 4888 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 4889 // CHECK11-NEXT: ret void 4890 // 4891 // 4892 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 4893 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 4894 // CHECK11-NEXT: entry: 4895 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 4896 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 4897 // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 4898 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4899 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 4900 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4901 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4902 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4903 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4904 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 4905 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 4906 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 4907 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 4908 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 4909 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4910 // CHECK11-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 4911 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4912 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4913 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 4914 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 4915 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 10) 4916 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 4917 // CHECK11: omp.dispatch.cond: 4918 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4919 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 4920 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4921 // CHECK11: cond.true: 4922 // CHECK11-NEXT: br label [[COND_END:%.*]] 4923 // CHECK11: cond.false: 4924 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4925 // CHECK11-NEXT: br label [[COND_END]] 4926 // CHECK11: cond.end: 4927 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 4928 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4929 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4930 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 4931 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4932 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4933 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 4934 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 4935 // CHECK11: omp.dispatch.body: 4936 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4937 // CHECK11: omp.inner.for.cond: 4938 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 4939 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28 4940 // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 4941 // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4942 // CHECK11: omp.inner.for.body: 4943 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 4944 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 4945 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4946 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !28 4947 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !28 4948 // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]] 4949 // CHECK11-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28 4950 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4951 // CHECK11: omp.body.continue: 4952 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4953 // CHECK11: omp.inner.for.inc: 4954 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 4955 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 4956 // CHECK11-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 4957 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] 4958 // CHECK11: omp.inner.for.end: 4959 // CHECK11-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 4960 // CHECK11: omp.dispatch.inc: 4961 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4962 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4963 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 4964 // CHECK11-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4 4965 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4966 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4967 // CHECK11-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 4968 // CHECK11-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4 4969 // CHECK11-NEXT: br label [[OMP_DISPATCH_COND]] 4970 // CHECK11: omp.dispatch.end: 4971 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 4972 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4973 // CHECK11-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 4974 // CHECK11-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4975 // CHECK11: .omp.final.then: 4976 // CHECK11-NEXT: store i32 10, i32* [[I]], align 4 4977 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 4978 // CHECK11: .omp.final.done: 4979 // CHECK11-NEXT: ret void 4980 // 4981 // 4982 // CHECK11-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 4983 // CHECK11-SAME: () #[[ATTR5:[0-9]+]] { 4984 // CHECK11-NEXT: entry: 4985 // CHECK11-NEXT: call void @__tgt_register_requires(i64 1) 4986 // CHECK11-NEXT: ret void 4987 // 4988 // 4989 // CHECK12-LABEL: define {{[^@]+}}@main 4990 // CHECK12-SAME: (i32 noundef [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 4991 // CHECK12-NEXT: entry: 4992 // CHECK12-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 4993 // CHECK12-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 4994 // CHECK12-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 4 4995 // CHECK12-NEXT: [[N:%.*]] = alloca i32, align 4 4996 // CHECK12-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 4997 // CHECK12-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 4998 // CHECK12-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4 4999 // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 5000 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 5001 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 5002 // CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 5003 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 5004 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5005 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5006 // CHECK12-NEXT: [[N_CASTED3:%.*]] = alloca i32, align 4 5007 // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 5008 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 5009 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 5010 // CHECK12-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 5011 // CHECK12-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 5012 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 5013 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 5014 // CHECK12-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 5015 // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [3 x i8*], align 4 5016 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [3 x i8*], align 4 5017 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [3 x i8*], align 4 5018 // CHECK12-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [3 x i64], align 4 5019 // CHECK12-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 5020 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 5021 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 5022 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 5023 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 5024 // CHECK12-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 5025 // CHECK12-NEXT: store i32 100, i32* [[N]], align 4 5026 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4 5027 // CHECK12-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 5028 // CHECK12-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 5029 // CHECK12-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 5030 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 5031 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[N]], align 4 5032 // CHECK12-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 5033 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 5034 // CHECK12-NEXT: [[TMP4:%.*]] = mul nuw i32 [[TMP0]], 4 5035 // CHECK12-NEXT: [[TMP5:%.*]] = sext i32 [[TMP4]] to i64 5036 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5037 // CHECK12-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32* 5038 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP7]], align 4 5039 // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5040 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* 5041 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 5042 // CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 5043 // CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 5044 // CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 5045 // CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 5046 // CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 5047 // CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 5048 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 5049 // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 5050 // CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 5051 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 5052 // CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 5053 // CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 5054 // CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 5055 // CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 5056 // CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 5057 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** 5058 // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 5059 // CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 5060 // CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** 5061 // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 5062 // CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 5063 // CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 5064 // CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 5065 // CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 5066 // CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5067 // CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5068 // CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 5069 // CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 5070 // CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 5071 // CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5072 // CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 5073 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 5074 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 5075 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5076 // CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5077 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 5078 // CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 5079 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) 5080 // CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 5081 // CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 5082 // CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 5083 // CHECK12: omp_offload.failed: 5084 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] 5085 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] 5086 // CHECK12: omp_offload.cont: 5087 // CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 5088 // CHECK12-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 5089 // CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 5090 // CHECK12-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 5091 // CHECK12-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 5092 // CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 5093 // CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* 5094 // CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 5095 // CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 5096 // CHECK12-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* 5097 // CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 5098 // CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 5099 // CHECK12-NEXT: store i64 4, i64* [[TMP41]], align 4 5100 // CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 5101 // CHECK12-NEXT: store i8* null, i8** [[TMP42]], align 4 5102 // CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 5103 // CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* 5104 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 5105 // CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 5106 // CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* 5107 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 5108 // CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 5109 // CHECK12-NEXT: store i64 4, i64* [[TMP47]], align 4 5110 // CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 5111 // CHECK12-NEXT: store i8* null, i8** [[TMP48]], align 4 5112 // CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 5113 // CHECK12-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** 5114 // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 5115 // CHECK12-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 5116 // CHECK12-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** 5117 // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 5118 // CHECK12-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 5119 // CHECK12-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 5120 // CHECK12-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 5121 // CHECK12-NEXT: store i8* null, i8** [[TMP54]], align 4 5122 // CHECK12-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 5123 // CHECK12-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 5124 // CHECK12-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 5125 // CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 5126 // CHECK12-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 5127 // CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 5128 // CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 5129 // CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 5130 // CHECK12-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 5131 // CHECK12-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 5132 // CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 5133 // CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 5134 // CHECK12-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 5135 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) 5136 // CHECK12-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 5137 // CHECK12-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 5138 // CHECK12-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] 5139 // CHECK12: omp_offload.failed15: 5140 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] 5141 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT16]] 5142 // CHECK12: omp_offload.cont16: 5143 // CHECK12-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 5144 // CHECK12-NEXT: store i32 [[TMP64]], i32* [[N_CASTED17]], align 4 5145 // CHECK12-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_CASTED17]], align 4 5146 // CHECK12-NEXT: [[TMP66:%.*]] = mul nuw i32 [[TMP0]], 4 5147 // CHECK12-NEXT: [[TMP67:%.*]] = sext i32 [[TMP66]] to i64 5148 // CHECK12-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 5149 // CHECK12-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* 5150 // CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP69]], align 4 5151 // CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 5152 // CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* 5153 // CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 5154 // CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 5155 // CHECK12-NEXT: store i64 4, i64* [[TMP72]], align 4 5156 // CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 5157 // CHECK12-NEXT: store i8* null, i8** [[TMP73]], align 4 5158 // CHECK12-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 5159 // CHECK12-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* 5160 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 5161 // CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 5162 // CHECK12-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* 5163 // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP77]], align 4 5164 // CHECK12-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 5165 // CHECK12-NEXT: store i64 4, i64* [[TMP78]], align 4 5166 // CHECK12-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 5167 // CHECK12-NEXT: store i8* null, i8** [[TMP79]], align 4 5168 // CHECK12-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 5169 // CHECK12-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** 5170 // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 4 5171 // CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 5172 // CHECK12-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32** 5173 // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP83]], align 4 5174 // CHECK12-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 5175 // CHECK12-NEXT: store i64 [[TMP67]], i64* [[TMP84]], align 4 5176 // CHECK12-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 5177 // CHECK12-NEXT: store i8* null, i8** [[TMP85]], align 4 5178 // CHECK12-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 5179 // CHECK12-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 5180 // CHECK12-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 5181 // CHECK12-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 5182 // CHECK12-NEXT: store i32 [[TMP89]], i32* [[DOTCAPTURE_EXPR_23]], align 4 5183 // CHECK12-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 5184 // CHECK12-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP90]], 0 5185 // CHECK12-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 5186 // CHECK12-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 5187 // CHECK12-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 5188 // CHECK12-NEXT: [[TMP91:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 5189 // CHECK12-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP91]], 1 5190 // CHECK12-NEXT: [[TMP92:%.*]] = zext i32 [[ADD28]] to i64 5191 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP92]]) 5192 // CHECK12-NEXT: [[TMP93:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP86]], i8** [[TMP87]], i64* [[TMP88]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 5193 // CHECK12-NEXT: [[TMP94:%.*]] = icmp ne i32 [[TMP93]], 0 5194 // CHECK12-NEXT: br i1 [[TMP94]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] 5195 // CHECK12: omp_offload.failed29: 5196 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP65]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] 5197 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT30]] 5198 // CHECK12: omp_offload.cont30: 5199 // CHECK12-NEXT: [[TMP95:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 5200 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP95]]) 5201 // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 5202 // CHECK12-NEXT: [[TMP96:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 5203 // CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP96]]) 5204 // CHECK12-NEXT: [[TMP97:%.*]] = load i32, i32* [[RETVAL]], align 4 5205 // CHECK12-NEXT: ret i32 [[TMP97]] 5206 // 5207 // 5208 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 5209 // CHECK12-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2:[0-9]+]] { 5210 // CHECK12-NEXT: entry: 5211 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5212 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 5213 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 5214 // CHECK12-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5215 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 5216 // CHECK12-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 5217 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 5218 // CHECK12-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 5219 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) 5220 // CHECK12-NEXT: ret void 5221 // 5222 // 5223 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined. 5224 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 5225 // CHECK12-NEXT: entry: 5226 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5227 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5228 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4 5229 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 5230 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 5231 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5232 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 5233 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5234 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5235 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 5236 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5237 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5238 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5239 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5240 // CHECK12-NEXT: [[I3:%.*]] = alloca i32, align 4 5241 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5242 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5243 // CHECK12-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4 5244 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 5245 // CHECK12-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 5246 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4 5247 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 5248 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 4 5249 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 5250 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 5251 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5252 // CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 5253 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 5254 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 5255 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5256 // CHECK12-NEXT: store i32 0, i32* [[I]], align 4 5257 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5258 // CHECK12-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 5259 // CHECK12-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 5260 // CHECK12: omp.precond.then: 5261 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5262 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5263 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 5264 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5265 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5266 // CHECK12-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5267 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 5268 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP8]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5269 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5270 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5271 // CHECK12-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] 5272 // CHECK12-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5273 // CHECK12: cond.true: 5274 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5275 // CHECK12-NEXT: br label [[COND_END:%.*]] 5276 // CHECK12: cond.false: 5277 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5278 // CHECK12-NEXT: br label [[COND_END]] 5279 // CHECK12: cond.end: 5280 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 5281 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5282 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5283 // CHECK12-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 5284 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5285 // CHECK12: omp.inner.for.cond: 5286 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 5287 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !10 5288 // CHECK12-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 5289 // CHECK12-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5290 // CHECK12: omp.inner.for.body: 5291 // CHECK12-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 5292 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 5293 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5294 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !10 5295 // CHECK12-NEXT: [[TMP17:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !10 5296 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 [[TMP17]] 5297 // CHECK12-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !10 5298 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5299 // CHECK12: omp.body.continue: 5300 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5301 // CHECK12: omp.inner.for.inc: 5302 // CHECK12-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 5303 // CHECK12-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 5304 // CHECK12-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !10 5305 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP11:![0-9]+]] 5306 // CHECK12: omp.inner.for.end: 5307 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5308 // CHECK12: omp.loop.exit: 5309 // CHECK12-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5310 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4 5311 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]]) 5312 // CHECK12-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5313 // CHECK12-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 5314 // CHECK12-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5315 // CHECK12: .omp.final.then: 5316 // CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5317 // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 5318 // CHECK12-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 5319 // CHECK12-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 5320 // CHECK12-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]] 5321 // CHECK12-NEXT: store i32 [[ADD10]], i32* [[I3]], align 4 5322 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 5323 // CHECK12: .omp.final.done: 5324 // CHECK12-NEXT: br label [[OMP_PRECOND_END]] 5325 // CHECK12: omp.precond.end: 5326 // CHECK12-NEXT: ret void 5327 // 5328 // 5329 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105 5330 // CHECK12-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 5331 // CHECK12-NEXT: entry: 5332 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5333 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 5334 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 5335 // CHECK12-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5336 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 5337 // CHECK12-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 5338 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 5339 // CHECK12-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 5340 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) 5341 // CHECK12-NEXT: ret void 5342 // 5343 // 5344 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1 5345 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 5346 // CHECK12-NEXT: entry: 5347 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5348 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5349 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4 5350 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 5351 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 5352 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5353 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 5354 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5355 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5356 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 5357 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5358 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5359 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5360 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5361 // CHECK12-NEXT: [[I3:%.*]] = alloca i32, align 4 5362 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5363 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5364 // CHECK12-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4 5365 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 5366 // CHECK12-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 5367 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4 5368 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 5369 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 4 5370 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 5371 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 5372 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5373 // CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 5374 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 5375 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 5376 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5377 // CHECK12-NEXT: store i32 0, i32* [[I]], align 4 5378 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5379 // CHECK12-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 5380 // CHECK12-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 5381 // CHECK12: omp.precond.then: 5382 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5383 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5384 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 5385 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5386 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5387 // CHECK12-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5388 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 5389 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP8]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5390 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5391 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5392 // CHECK12-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]] 5393 // CHECK12-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5394 // CHECK12: cond.true: 5395 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5396 // CHECK12-NEXT: br label [[COND_END:%.*]] 5397 // CHECK12: cond.false: 5398 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5399 // CHECK12-NEXT: br label [[COND_END]] 5400 // CHECK12: cond.end: 5401 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ] 5402 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5403 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5404 // CHECK12-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_IV]], align 4 5405 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5406 // CHECK12: omp.inner.for.cond: 5407 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 5408 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !16 5409 // CHECK12-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]] 5410 // CHECK12-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5411 // CHECK12: omp.inner.for.body: 5412 // CHECK12-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 5413 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1 5414 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5415 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !16 5416 // CHECK12-NEXT: [[TMP17:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !16 5417 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 [[TMP17]] 5418 // CHECK12-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !16 5419 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5420 // CHECK12: omp.body.continue: 5421 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5422 // CHECK12: omp.inner.for.inc: 5423 // CHECK12-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 5424 // CHECK12-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1 5425 // CHECK12-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !16 5426 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP17:![0-9]+]] 5427 // CHECK12: omp.inner.for.end: 5428 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5429 // CHECK12: omp.loop.exit: 5430 // CHECK12-NEXT: [[TMP19:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5431 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP19]], align 4 5432 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP20]]) 5433 // CHECK12-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5434 // CHECK12-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 5435 // CHECK12-NEXT: br i1 [[TMP22]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5436 // CHECK12: .omp.final.then: 5437 // CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5438 // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i32 [[TMP23]], 0 5439 // CHECK12-NEXT: [[DIV8:%.*]] = sdiv i32 [[SUB7]], 1 5440 // CHECK12-NEXT: [[MUL9:%.*]] = mul nsw i32 [[DIV8]], 1 5441 // CHECK12-NEXT: [[ADD10:%.*]] = add nsw i32 0, [[MUL9]] 5442 // CHECK12-NEXT: store i32 [[ADD10]], i32* [[I3]], align 4 5443 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 5444 // CHECK12: .omp.final.done: 5445 // CHECK12-NEXT: br label [[OMP_PRECOND_END]] 5446 // CHECK12: omp.precond.end: 5447 // CHECK12-NEXT: ret void 5448 // 5449 // 5450 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110 5451 // CHECK12-SAME: (i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { 5452 // CHECK12-NEXT: entry: 5453 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4 5454 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 5455 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 5456 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5457 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 5458 // CHECK12-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 5459 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 5460 // CHECK12-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 5461 // CHECK12-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 5462 // CHECK12-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 5463 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 5464 // CHECK12-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4 5465 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5466 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5467 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 5468 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) 5469 // CHECK12-NEXT: ret void 5470 // 5471 // 5472 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3 5473 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { 5474 // CHECK12-NEXT: entry: 5475 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5476 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5477 // CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32*, align 4 5478 // CHECK12-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 5479 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca i32*, align 4 5480 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4 5481 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5482 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 5483 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5484 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 5485 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 5486 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5487 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5488 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5489 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5490 // CHECK12-NEXT: [[I4:%.*]] = alloca i32, align 4 5491 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5492 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5493 // CHECK12-NEXT: store i32* [[N]], i32** [[N_ADDR]], align 4 5494 // CHECK12-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 5495 // CHECK12-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 5496 // CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5497 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[N_ADDR]], align 4 5498 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 5499 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[A_ADDR]], align 4 5500 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP0]], align 4 5501 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5502 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5503 // CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 5504 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 5505 // CHECK12-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 5506 // CHECK12-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 5507 // CHECK12-NEXT: store i32 0, i32* [[I]], align 4 5508 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5509 // CHECK12-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 5510 // CHECK12-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 5511 // CHECK12: omp.precond.then: 5512 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5513 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 5514 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_UB]], align 4 5515 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5516 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5517 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 5518 // CHECK12-NEXT: [[TMP8:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5519 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4 5520 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP9]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 [[TMP7]]) 5521 // CHECK12-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 5522 // CHECK12: omp.dispatch.cond: 5523 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5524 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 5525 // CHECK12-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]] 5526 // CHECK12-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5527 // CHECK12: cond.true: 5528 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 5529 // CHECK12-NEXT: br label [[COND_END:%.*]] 5530 // CHECK12: cond.false: 5531 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5532 // CHECK12-NEXT: br label [[COND_END]] 5533 // CHECK12: cond.end: 5534 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ] 5535 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5536 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5537 // CHECK12-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_IV]], align 4 5538 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5539 // CHECK12-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5540 // CHECK12-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] 5541 // CHECK12-NEXT: br i1 [[CMP6]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 5542 // CHECK12: omp.dispatch.body: 5543 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5544 // CHECK12: omp.inner.for.cond: 5545 // CHECK12-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 5546 // CHECK12-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19 5547 // CHECK12-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 5548 // CHECK12-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5549 // CHECK12: omp.inner.for.body: 5550 // CHECK12-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 5551 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP19]], 1 5552 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5553 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I4]], align 4, !llvm.access.group !19 5554 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[I4]], align 4, !llvm.access.group !19 5555 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i32 [[TMP20]] 5556 // CHECK12-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !19 5557 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5558 // CHECK12: omp.body.continue: 5559 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5560 // CHECK12: omp.inner.for.inc: 5561 // CHECK12-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 5562 // CHECK12-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP21]], 1 5563 // CHECK12-NEXT: store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 5564 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 5565 // CHECK12: omp.inner.for.end: 5566 // CHECK12-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 5567 // CHECK12: omp.dispatch.inc: 5568 // CHECK12-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5569 // CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 5570 // CHECK12-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]] 5571 // CHECK12-NEXT: store i32 [[ADD9]], i32* [[DOTOMP_LB]], align 4 5572 // CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5573 // CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 5574 // CHECK12-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP24]], [[TMP25]] 5575 // CHECK12-NEXT: store i32 [[ADD10]], i32* [[DOTOMP_UB]], align 4 5576 // CHECK12-NEXT: br label [[OMP_DISPATCH_COND]] 5577 // CHECK12: omp.dispatch.end: 5578 // CHECK12-NEXT: [[TMP26:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5579 // CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4 5580 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP27]]) 5581 // CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5582 // CHECK12-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 5583 // CHECK12-NEXT: br i1 [[TMP29]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5584 // CHECK12: .omp.final.then: 5585 // CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5586 // CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP30]], 0 5587 // CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 5588 // CHECK12-NEXT: [[MUL13:%.*]] = mul nsw i32 [[DIV12]], 1 5589 // CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 5590 // CHECK12-NEXT: store i32 [[ADD14]], i32* [[I4]], align 4 5591 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 5592 // CHECK12: .omp.final.done: 5593 // CHECK12-NEXT: br label [[OMP_PRECOND_END]] 5594 // CHECK12: omp.precond.end: 5595 // CHECK12-NEXT: ret void 5596 // 5597 // 5598 // CHECK12-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_ 5599 // CHECK12-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR4:[0-9]+]] comdat { 5600 // CHECK12-NEXT: entry: 5601 // CHECK12-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 5602 // CHECK12-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 5603 // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 5604 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 5605 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 5606 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 5607 // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 4 5608 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 4 5609 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 4 5610 // CHECK12-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 5611 // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [1 x i8*], align 4 5612 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [1 x i8*], align 4 5613 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [1 x i8*], align 4 5614 // CHECK12-NEXT: [[_TMP10:%.*]] = alloca i32, align 4 5615 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 5616 // CHECK12-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5617 // CHECK12-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [10 x i32]** 5618 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP1]], align 4 5619 // CHECK12-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5620 // CHECK12-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [10 x i32]** 5621 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP3]], align 4 5622 // CHECK12-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 5623 // CHECK12-NEXT: store i8* null, i8** [[TMP4]], align 4 5624 // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5625 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5626 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 5627 // CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 5628 // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 5629 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 5630 // CHECK12: omp_offload.failed: 5631 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79([10 x i32]* [[A]]) #[[ATTR3]] 5632 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] 5633 // CHECK12: omp_offload.cont: 5634 // CHECK12-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 5635 // CHECK12-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x i32]** 5636 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP10]], align 4 5637 // CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 5638 // CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x i32]** 5639 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP12]], align 4 5640 // CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0 5641 // CHECK12-NEXT: store i8* null, i8** [[TMP13]], align 4 5642 // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 5643 // CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 5644 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 5645 // CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 5646 // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 5647 // CHECK12-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 5648 // CHECK12: omp_offload.failed5: 5649 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84([10 x i32]* [[A]]) #[[ATTR3]] 5650 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT6]] 5651 // CHECK12: omp_offload.cont6: 5652 // CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 5653 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to [10 x i32]** 5654 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP19]], align 4 5655 // CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 5656 // CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [10 x i32]** 5657 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP21]], align 4 5658 // CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0 5659 // CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 5660 // CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 5661 // CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 5662 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) 5663 // CHECK12-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 5664 // CHECK12-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 5665 // CHECK12-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] 5666 // CHECK12: omp_offload.failed11: 5667 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89([10 x i32]* [[A]]) #[[ATTR3]] 5668 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT12]] 5669 // CHECK12: omp_offload.cont12: 5670 // CHECK12-NEXT: ret i32 0 5671 // 5672 // 5673 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79 5674 // CHECK12-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 5675 // CHECK12-NEXT: entry: 5676 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 5677 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 5678 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 5679 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 5680 // CHECK12-NEXT: ret void 5681 // 5682 // 5683 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 5684 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 5685 // CHECK12-NEXT: entry: 5686 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5687 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5688 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 5689 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5690 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 5691 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5692 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5693 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5694 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5695 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 5696 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5697 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5698 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 5699 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 5700 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5701 // CHECK12-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 5702 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5703 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5704 // CHECK12-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5705 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 5706 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5707 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5708 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 5709 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5710 // CHECK12: cond.true: 5711 // CHECK12-NEXT: br label [[COND_END:%.*]] 5712 // CHECK12: cond.false: 5713 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5714 // CHECK12-NEXT: br label [[COND_END]] 5715 // CHECK12: cond.end: 5716 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 5717 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5718 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5719 // CHECK12-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 5720 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5721 // CHECK12: omp.inner.for.cond: 5722 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 5723 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22 5724 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 5725 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5726 // CHECK12: omp.inner.for.body: 5727 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 5728 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 5729 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5730 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22 5731 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !22 5732 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]] 5733 // CHECK12-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !22 5734 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5735 // CHECK12: omp.body.continue: 5736 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5737 // CHECK12: omp.inner.for.inc: 5738 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 5739 // CHECK12-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 5740 // CHECK12-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 5741 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 5742 // CHECK12: omp.inner.for.end: 5743 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5744 // CHECK12: omp.loop.exit: 5745 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 5746 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5747 // CHECK12-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 5748 // CHECK12-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5749 // CHECK12: .omp.final.then: 5750 // CHECK12-NEXT: store i32 10, i32* [[I]], align 4 5751 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 5752 // CHECK12: .omp.final.done: 5753 // CHECK12-NEXT: ret void 5754 // 5755 // 5756 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84 5757 // CHECK12-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 5758 // CHECK12-NEXT: entry: 5759 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 5760 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 5761 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 5762 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 5763 // CHECK12-NEXT: ret void 5764 // 5765 // 5766 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 5767 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 5768 // CHECK12-NEXT: entry: 5769 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5770 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5771 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 5772 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5773 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 5774 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5775 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5776 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5777 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5778 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 5779 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5780 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5781 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 5782 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 5783 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5784 // CHECK12-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 5785 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5786 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5787 // CHECK12-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5788 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 5789 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5790 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5791 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 5792 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5793 // CHECK12: cond.true: 5794 // CHECK12-NEXT: br label [[COND_END:%.*]] 5795 // CHECK12: cond.false: 5796 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5797 // CHECK12-NEXT: br label [[COND_END]] 5798 // CHECK12: cond.end: 5799 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 5800 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5801 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5802 // CHECK12-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 5803 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5804 // CHECK12: omp.inner.for.cond: 5805 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 5806 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !25 5807 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 5808 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5809 // CHECK12: omp.inner.for.body: 5810 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 5811 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 5812 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5813 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !25 5814 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !25 5815 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP9]] 5816 // CHECK12-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !25 5817 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5818 // CHECK12: omp.body.continue: 5819 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5820 // CHECK12: omp.inner.for.inc: 5821 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 5822 // CHECK12-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], 1 5823 // CHECK12-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 5824 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]] 5825 // CHECK12: omp.inner.for.end: 5826 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5827 // CHECK12: omp.loop.exit: 5828 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 5829 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5830 // CHECK12-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 5831 // CHECK12-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5832 // CHECK12: .omp.final.then: 5833 // CHECK12-NEXT: store i32 10, i32* [[I]], align 4 5834 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 5835 // CHECK12: .omp.final.done: 5836 // CHECK12-NEXT: ret void 5837 // 5838 // 5839 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89 5840 // CHECK12-SAME: ([10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 5841 // CHECK12-NEXT: entry: 5842 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 5843 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 5844 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 5845 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) 5846 // CHECK12-NEXT: ret void 5847 // 5848 // 5849 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 5850 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { 5851 // CHECK12-NEXT: entry: 5852 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 5853 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 5854 // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 5855 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5856 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 5857 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5858 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5859 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5860 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5861 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 5862 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 5863 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 5864 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 5865 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 5866 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5867 // CHECK12-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 5868 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5869 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5870 // CHECK12-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 5871 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 5872 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 91, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 10) 5873 // CHECK12-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 5874 // CHECK12: omp.dispatch.cond: 5875 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5876 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9 5877 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5878 // CHECK12: cond.true: 5879 // CHECK12-NEXT: br label [[COND_END:%.*]] 5880 // CHECK12: cond.false: 5881 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5882 // CHECK12-NEXT: br label [[COND_END]] 5883 // CHECK12: cond.end: 5884 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 5885 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5886 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5887 // CHECK12-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 5888 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5889 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5890 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 5891 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 5892 // CHECK12: omp.dispatch.body: 5893 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5894 // CHECK12: omp.inner.for.cond: 5895 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 5896 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !28 5897 // CHECK12-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 5898 // CHECK12-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5899 // CHECK12: omp.inner.for.body: 5900 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 5901 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 5902 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5903 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !28 5904 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !28 5905 // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 [[TMP11]] 5906 // CHECK12-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !28 5907 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5908 // CHECK12: omp.body.continue: 5909 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5910 // CHECK12: omp.inner.for.inc: 5911 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 5912 // CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1 5913 // CHECK12-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 5914 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]] 5915 // CHECK12: omp.inner.for.end: 5916 // CHECK12-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 5917 // CHECK12: omp.dispatch.inc: 5918 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5919 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 5920 // CHECK12-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 5921 // CHECK12-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_LB]], align 4 5922 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5923 // CHECK12-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 5924 // CHECK12-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 5925 // CHECK12-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_UB]], align 4 5926 // CHECK12-NEXT: br label [[OMP_DISPATCH_COND]] 5927 // CHECK12: omp.dispatch.end: 5928 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 5929 // CHECK12-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5930 // CHECK12-NEXT: [[TMP18:%.*]] = icmp ne i32 [[TMP17]], 0 5931 // CHECK12-NEXT: br i1 [[TMP18]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5932 // CHECK12: .omp.final.then: 5933 // CHECK12-NEXT: store i32 10, i32* [[I]], align 4 5934 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 5935 // CHECK12: .omp.final.done: 5936 // CHECK12-NEXT: ret void 5937 // 5938 // 5939 // CHECK12-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 5940 // CHECK12-SAME: () #[[ATTR5:[0-9]+]] { 5941 // CHECK12-NEXT: entry: 5942 // CHECK12-NEXT: call void @__tgt_register_requires(i64 1) 5943 // CHECK12-NEXT: ret void 5944 // 5945 // 5946 // CHECK13-LABEL: define {{[^@]+}}@main 5947 // CHECK13-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 5948 // CHECK13-NEXT: entry: 5949 // CHECK13-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 5950 // CHECK13-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 5951 // CHECK13-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 5952 // CHECK13-NEXT: [[N:%.*]] = alloca i32, align 4 5953 // CHECK13-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 5954 // CHECK13-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 5955 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 5956 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 5957 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 5958 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5959 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5960 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4 5961 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5962 // CHECK13-NEXT: [[I3:%.*]] = alloca i32, align 4 5963 // CHECK13-NEXT: [[_TMP10:%.*]] = alloca i32, align 4 5964 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 5965 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4 5966 // CHECK13-NEXT: [[DOTOMP_LB16:%.*]] = alloca i32, align 4 5967 // CHECK13-NEXT: [[DOTOMP_UB17:%.*]] = alloca i32, align 4 5968 // CHECK13-NEXT: [[I18:%.*]] = alloca i32, align 4 5969 // CHECK13-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4 5970 // CHECK13-NEXT: [[I22:%.*]] = alloca i32, align 4 5971 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 5972 // CHECK13-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 5973 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 5974 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 5975 // CHECK13-NEXT: [[DOTOMP_LB46:%.*]] = alloca i32, align 4 5976 // CHECK13-NEXT: [[DOTOMP_UB47:%.*]] = alloca i32, align 4 5977 // CHECK13-NEXT: [[I48:%.*]] = alloca i32, align 4 5978 // CHECK13-NEXT: [[DOTOMP_IV51:%.*]] = alloca i32, align 4 5979 // CHECK13-NEXT: [[I52:%.*]] = alloca i32, align 4 5980 // CHECK13-NEXT: store i32 0, i32* [[RETVAL]], align 4 5981 // CHECK13-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 5982 // CHECK13-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 5983 // CHECK13-NEXT: store i32 100, i32* [[N]], align 4 5984 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4 5985 // CHECK13-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 5986 // CHECK13-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 5987 // CHECK13-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 5988 // CHECK13-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 5989 // CHECK13-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 5990 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[N]], align 4 5991 // CHECK13-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 5992 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 5993 // CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 5994 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 5995 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 5996 // CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 5997 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5998 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 5999 // CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 6000 // CHECK13-NEXT: store i32 0, i32* [[I]], align 4 6001 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6002 // CHECK13-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]] 6003 // CHECK13-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] 6004 // CHECK13: simd.if.then: 6005 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6006 // CHECK13-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 6007 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6008 // CHECK13: omp.inner.for.cond: 6009 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6010 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 6011 // CHECK13-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 6012 // CHECK13-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6013 // CHECK13: omp.inner.for.body: 6014 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6015 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 6016 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6017 // CHECK13-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2 6018 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2 6019 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 6020 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM]] 6021 // CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !2 6022 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6023 // CHECK13: omp.body.continue: 6024 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6025 // CHECK13: omp.inner.for.inc: 6026 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6027 // CHECK13-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1 6028 // CHECK13-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6029 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 6030 // CHECK13: omp.inner.for.end: 6031 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6032 // CHECK13-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP13]], 0 6033 // CHECK13-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 6034 // CHECK13-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1 6035 // CHECK13-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]] 6036 // CHECK13-NEXT: store i32 [[ADD9]], i32* [[I3]], align 4 6037 // CHECK13-NEXT: br label [[SIMD_IF_END]] 6038 // CHECK13: simd.if.end: 6039 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[N]], align 4 6040 // CHECK13-NEXT: store i32 [[TMP14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 6041 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6042 // CHECK13-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP15]], 0 6043 // CHECK13-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1 6044 // CHECK13-NEXT: [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1 6045 // CHECK13-NEXT: store i32 [[SUB15]], i32* [[DOTCAPTURE_EXPR_12]], align 4 6046 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB16]], align 4 6047 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4 6048 // CHECK13-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_UB17]], align 4 6049 // CHECK13-NEXT: store i32 0, i32* [[I18]], align 4 6050 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6051 // CHECK13-NEXT: [[CMP19:%.*]] = icmp slt i32 0, [[TMP17]] 6052 // CHECK13-NEXT: br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END38:%.*]] 6053 // CHECK13: simd.if.then20: 6054 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB16]], align 4 6055 // CHECK13-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_IV21]], align 4 6056 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]] 6057 // CHECK13: omp.inner.for.cond23: 6058 // CHECK13-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6 6059 // CHECK13-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB17]], align 4, !llvm.access.group !6 6060 // CHECK13-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]] 6061 // CHECK13-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]] 6062 // CHECK13: omp.inner.for.body25: 6063 // CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6 6064 // CHECK13-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP21]], 1 6065 // CHECK13-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]] 6066 // CHECK13-NEXT: store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !6 6067 // CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !6 6068 // CHECK13-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP22]] to i64 6069 // CHECK13-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM28]] 6070 // CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX29]], align 4, !llvm.access.group !6 6071 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE30:%.*]] 6072 // CHECK13: omp.body.continue30: 6073 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]] 6074 // CHECK13: omp.inner.for.inc31: 6075 // CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6 6076 // CHECK13-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP23]], 1 6077 // CHECK13-NEXT: store i32 [[ADD32]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6 6078 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP7:![0-9]+]] 6079 // CHECK13: omp.inner.for.end33: 6080 // CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6081 // CHECK13-NEXT: [[SUB34:%.*]] = sub nsw i32 [[TMP24]], 0 6082 // CHECK13-NEXT: [[DIV35:%.*]] = sdiv i32 [[SUB34]], 1 6083 // CHECK13-NEXT: [[MUL36:%.*]] = mul nsw i32 [[DIV35]], 1 6084 // CHECK13-NEXT: [[ADD37:%.*]] = add nsw i32 0, [[MUL36]] 6085 // CHECK13-NEXT: store i32 [[ADD37]], i32* [[I22]], align 4 6086 // CHECK13-NEXT: br label [[SIMD_IF_END38]] 6087 // CHECK13: simd.if.end38: 6088 // CHECK13-NEXT: [[TMP25:%.*]] = load i32, i32* [[N]], align 4 6089 // CHECK13-NEXT: store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_39]], align 4 6090 // CHECK13-NEXT: [[TMP26:%.*]] = load i32, i32* [[N]], align 4 6091 // CHECK13-NEXT: store i32 [[TMP26]], i32* [[DOTCAPTURE_EXPR_41]], align 4 6092 // CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 6093 // CHECK13-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP27]], 0 6094 // CHECK13-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 6095 // CHECK13-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 6096 // CHECK13-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 6097 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB46]], align 4 6098 // CHECK13-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 6099 // CHECK13-NEXT: store i32 [[TMP28]], i32* [[DOTOMP_UB47]], align 4 6100 // CHECK13-NEXT: store i32 0, i32* [[I48]], align 4 6101 // CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 6102 // CHECK13-NEXT: [[CMP49:%.*]] = icmp slt i32 0, [[TMP29]] 6103 // CHECK13-NEXT: br i1 [[CMP49]], label [[SIMD_IF_THEN50:%.*]], label [[SIMD_IF_END68:%.*]] 6104 // CHECK13: simd.if.then50: 6105 // CHECK13-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_LB46]], align 4 6106 // CHECK13-NEXT: store i32 [[TMP30]], i32* [[DOTOMP_IV51]], align 4 6107 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND53:%.*]] 6108 // CHECK13: omp.inner.for.cond53: 6109 // CHECK13-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9 6110 // CHECK13-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_UB47]], align 4, !llvm.access.group !9 6111 // CHECK13-NEXT: [[CMP54:%.*]] = icmp sle i32 [[TMP31]], [[TMP32]] 6112 // CHECK13-NEXT: br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END63:%.*]] 6113 // CHECK13: omp.inner.for.body55: 6114 // CHECK13-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9 6115 // CHECK13-NEXT: [[MUL56:%.*]] = mul nsw i32 [[TMP33]], 1 6116 // CHECK13-NEXT: [[ADD57:%.*]] = add nsw i32 0, [[MUL56]] 6117 // CHECK13-NEXT: store i32 [[ADD57]], i32* [[I52]], align 4, !llvm.access.group !9 6118 // CHECK13-NEXT: [[TMP34:%.*]] = load i32, i32* [[I52]], align 4, !llvm.access.group !9 6119 // CHECK13-NEXT: [[IDXPROM58:%.*]] = sext i32 [[TMP34]] to i64 6120 // CHECK13-NEXT: [[ARRAYIDX59:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM58]] 6121 // CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX59]], align 4, !llvm.access.group !9 6122 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE60:%.*]] 6123 // CHECK13: omp.body.continue60: 6124 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC61:%.*]] 6125 // CHECK13: omp.inner.for.inc61: 6126 // CHECK13-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9 6127 // CHECK13-NEXT: [[ADD62:%.*]] = add nsw i32 [[TMP35]], 1 6128 // CHECK13-NEXT: store i32 [[ADD62]], i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9 6129 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP10:![0-9]+]] 6130 // CHECK13: omp.inner.for.end63: 6131 // CHECK13-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 6132 // CHECK13-NEXT: [[SUB64:%.*]] = sub nsw i32 [[TMP36]], 0 6133 // CHECK13-NEXT: [[DIV65:%.*]] = sdiv i32 [[SUB64]], 1 6134 // CHECK13-NEXT: [[MUL66:%.*]] = mul nsw i32 [[DIV65]], 1 6135 // CHECK13-NEXT: [[ADD67:%.*]] = add nsw i32 0, [[MUL66]] 6136 // CHECK13-NEXT: store i32 [[ADD67]], i32* [[I52]], align 4 6137 // CHECK13-NEXT: br label [[SIMD_IF_END68]] 6138 // CHECK13: simd.if.end68: 6139 // CHECK13-NEXT: [[TMP37:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 6140 // CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP37]]) 6141 // CHECK13-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 6142 // CHECK13-NEXT: [[TMP38:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 6143 // CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP38]]) 6144 // CHECK13-NEXT: [[TMP39:%.*]] = load i32, i32* [[RETVAL]], align 4 6145 // CHECK13-NEXT: ret i32 [[TMP39]] 6146 // 6147 // 6148 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_ 6149 // CHECK13-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat { 6150 // CHECK13-NEXT: entry: 6151 // CHECK13-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 6152 // CHECK13-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 6153 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 6154 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6155 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6156 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6157 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4 6158 // CHECK13-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6159 // CHECK13-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6160 // CHECK13-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6161 // CHECK13-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6162 // CHECK13-NEXT: [[I6:%.*]] = alloca i32, align 4 6163 // CHECK13-NEXT: [[_TMP18:%.*]] = alloca i32, align 4 6164 // CHECK13-NEXT: [[DOTOMP_LB19:%.*]] = alloca i32, align 4 6165 // CHECK13-NEXT: [[DOTOMP_UB20:%.*]] = alloca i32, align 4 6166 // CHECK13-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4 6167 // CHECK13-NEXT: [[I22:%.*]] = alloca i32, align 4 6168 // CHECK13-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 6169 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6170 // CHECK13-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 6171 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6172 // CHECK13-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6173 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6174 // CHECK13: omp.inner.for.cond: 6175 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 6176 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 6177 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6178 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6179 // CHECK13: omp.inner.for.body: 6180 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 6181 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6182 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6183 // CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 6184 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 6185 // CHECK13-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 6186 // CHECK13-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 6187 // CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !12 6188 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6189 // CHECK13: omp.body.continue: 6190 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6191 // CHECK13: omp.inner.for.inc: 6192 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 6193 // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1 6194 // CHECK13-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 6195 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 6196 // CHECK13: omp.inner.for.end: 6197 // CHECK13-NEXT: store i32 10, i32* [[I]], align 4 6198 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6199 // CHECK13-NEXT: store i32 9, i32* [[DOTOMP_UB4]], align 4 6200 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6201 // CHECK13-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV5]], align 4 6202 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6203 // CHECK13: omp.inner.for.cond7: 6204 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !15 6205 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !15 6206 // CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 6207 // CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END17:%.*]] 6208 // CHECK13: omp.inner.for.body9: 6209 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !15 6210 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1 6211 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6212 // CHECK13-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !15 6213 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !15 6214 // CHECK13-NEXT: [[IDXPROM12:%.*]] = sext i32 [[TMP10]] to i64 6215 // CHECK13-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM12]] 6216 // CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX13]], align 4, !llvm.access.group !15 6217 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]] 6218 // CHECK13: omp.body.continue14: 6219 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] 6220 // CHECK13: omp.inner.for.inc15: 6221 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !15 6222 // CHECK13-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP11]], 1 6223 // CHECK13-NEXT: store i32 [[ADD16]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !15 6224 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP16:![0-9]+]] 6225 // CHECK13: omp.inner.for.end17: 6226 // CHECK13-NEXT: store i32 10, i32* [[I6]], align 4 6227 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB19]], align 4 6228 // CHECK13-NEXT: store i32 9, i32* [[DOTOMP_UB20]], align 4 6229 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4 6230 // CHECK13-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV21]], align 4 6231 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]] 6232 // CHECK13: omp.inner.for.cond23: 6233 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !18 6234 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !18 6235 // CHECK13-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 6236 // CHECK13-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]] 6237 // CHECK13: omp.inner.for.body25: 6238 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !18 6239 // CHECK13-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP15]], 1 6240 // CHECK13-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]] 6241 // CHECK13-NEXT: store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !18 6242 // CHECK13-NEXT: [[TMP16:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !18 6243 // CHECK13-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP16]] to i64 6244 // CHECK13-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM28]] 6245 // CHECK13-NEXT: store i32 0, i32* [[ARRAYIDX29]], align 4, !llvm.access.group !18 6246 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE30:%.*]] 6247 // CHECK13: omp.body.continue30: 6248 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]] 6249 // CHECK13: omp.inner.for.inc31: 6250 // CHECK13-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !18 6251 // CHECK13-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP17]], 1 6252 // CHECK13-NEXT: store i32 [[ADD32]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !18 6253 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP19:![0-9]+]] 6254 // CHECK13: omp.inner.for.end33: 6255 // CHECK13-NEXT: store i32 10, i32* [[I22]], align 4 6256 // CHECK13-NEXT: ret i32 0 6257 // 6258 // 6259 // CHECK14-LABEL: define {{[^@]+}}@main 6260 // CHECK14-SAME: (i32 noundef signext [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 6261 // CHECK14-NEXT: entry: 6262 // CHECK14-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 6263 // CHECK14-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 6264 // CHECK14-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 8 6265 // CHECK14-NEXT: [[N:%.*]] = alloca i32, align 4 6266 // CHECK14-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 6267 // CHECK14-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 6268 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 6269 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6270 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 6271 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6272 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6273 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4 6274 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6275 // CHECK14-NEXT: [[I3:%.*]] = alloca i32, align 4 6276 // CHECK14-NEXT: [[_TMP10:%.*]] = alloca i32, align 4 6277 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 6278 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4 6279 // CHECK14-NEXT: [[DOTOMP_LB16:%.*]] = alloca i32, align 4 6280 // CHECK14-NEXT: [[DOTOMP_UB17:%.*]] = alloca i32, align 4 6281 // CHECK14-NEXT: [[I18:%.*]] = alloca i32, align 4 6282 // CHECK14-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4 6283 // CHECK14-NEXT: [[I22:%.*]] = alloca i32, align 4 6284 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 6285 // CHECK14-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 6286 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 6287 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 6288 // CHECK14-NEXT: [[DOTOMP_LB46:%.*]] = alloca i32, align 4 6289 // CHECK14-NEXT: [[DOTOMP_UB47:%.*]] = alloca i32, align 4 6290 // CHECK14-NEXT: [[I48:%.*]] = alloca i32, align 4 6291 // CHECK14-NEXT: [[DOTOMP_IV51:%.*]] = alloca i32, align 4 6292 // CHECK14-NEXT: [[I52:%.*]] = alloca i32, align 4 6293 // CHECK14-NEXT: store i32 0, i32* [[RETVAL]], align 4 6294 // CHECK14-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 6295 // CHECK14-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 6296 // CHECK14-NEXT: store i32 100, i32* [[N]], align 4 6297 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4 6298 // CHECK14-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64 6299 // CHECK14-NEXT: [[TMP2:%.*]] = call i8* @llvm.stacksave() 6300 // CHECK14-NEXT: store i8* [[TMP2]], i8** [[SAVED_STACK]], align 8 6301 // CHECK14-NEXT: [[VLA:%.*]] = alloca i32, i64 [[TMP1]], align 4 6302 // CHECK14-NEXT: store i64 [[TMP1]], i64* [[__VLA_EXPR0]], align 8 6303 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[N]], align 4 6304 // CHECK14-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR_]], align 4 6305 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6306 // CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0 6307 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 6308 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 6309 // CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 6310 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6311 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 6312 // CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 6313 // CHECK14-NEXT: store i32 0, i32* [[I]], align 4 6314 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6315 // CHECK14-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP6]] 6316 // CHECK14-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] 6317 // CHECK14: simd.if.then: 6318 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6319 // CHECK14-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 6320 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6321 // CHECK14: omp.inner.for.cond: 6322 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6323 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 6324 // CHECK14-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 6325 // CHECK14-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6326 // CHECK14: omp.inner.for.body: 6327 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6328 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 6329 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6330 // CHECK14-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !2 6331 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !2 6332 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64 6333 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM]] 6334 // CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !2 6335 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6336 // CHECK14: omp.body.continue: 6337 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6338 // CHECK14: omp.inner.for.inc: 6339 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6340 // CHECK14-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], 1 6341 // CHECK14-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6342 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 6343 // CHECK14: omp.inner.for.end: 6344 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6345 // CHECK14-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP13]], 0 6346 // CHECK14-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 6347 // CHECK14-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1 6348 // CHECK14-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]] 6349 // CHECK14-NEXT: store i32 [[ADD9]], i32* [[I3]], align 4 6350 // CHECK14-NEXT: br label [[SIMD_IF_END]] 6351 // CHECK14: simd.if.end: 6352 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[N]], align 4 6353 // CHECK14-NEXT: store i32 [[TMP14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 6354 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6355 // CHECK14-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP15]], 0 6356 // CHECK14-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1 6357 // CHECK14-NEXT: [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1 6358 // CHECK14-NEXT: store i32 [[SUB15]], i32* [[DOTCAPTURE_EXPR_12]], align 4 6359 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB16]], align 4 6360 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4 6361 // CHECK14-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_UB17]], align 4 6362 // CHECK14-NEXT: store i32 0, i32* [[I18]], align 4 6363 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6364 // CHECK14-NEXT: [[CMP19:%.*]] = icmp slt i32 0, [[TMP17]] 6365 // CHECK14-NEXT: br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END38:%.*]] 6366 // CHECK14: simd.if.then20: 6367 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_LB16]], align 4 6368 // CHECK14-NEXT: store i32 [[TMP18]], i32* [[DOTOMP_IV21]], align 4 6369 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]] 6370 // CHECK14: omp.inner.for.cond23: 6371 // CHECK14-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6 6372 // CHECK14-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_UB17]], align 4, !llvm.access.group !6 6373 // CHECK14-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP19]], [[TMP20]] 6374 // CHECK14-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]] 6375 // CHECK14: omp.inner.for.body25: 6376 // CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6 6377 // CHECK14-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP21]], 1 6378 // CHECK14-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]] 6379 // CHECK14-NEXT: store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !6 6380 // CHECK14-NEXT: [[TMP22:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !6 6381 // CHECK14-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP22]] to i64 6382 // CHECK14-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM28]] 6383 // CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX29]], align 4, !llvm.access.group !6 6384 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE30:%.*]] 6385 // CHECK14: omp.body.continue30: 6386 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]] 6387 // CHECK14: omp.inner.for.inc31: 6388 // CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6 6389 // CHECK14-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP23]], 1 6390 // CHECK14-NEXT: store i32 [[ADD32]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !6 6391 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP7:![0-9]+]] 6392 // CHECK14: omp.inner.for.end33: 6393 // CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6394 // CHECK14-NEXT: [[SUB34:%.*]] = sub nsw i32 [[TMP24]], 0 6395 // CHECK14-NEXT: [[DIV35:%.*]] = sdiv i32 [[SUB34]], 1 6396 // CHECK14-NEXT: [[MUL36:%.*]] = mul nsw i32 [[DIV35]], 1 6397 // CHECK14-NEXT: [[ADD37:%.*]] = add nsw i32 0, [[MUL36]] 6398 // CHECK14-NEXT: store i32 [[ADD37]], i32* [[I22]], align 4 6399 // CHECK14-NEXT: br label [[SIMD_IF_END38]] 6400 // CHECK14: simd.if.end38: 6401 // CHECK14-NEXT: [[TMP25:%.*]] = load i32, i32* [[N]], align 4 6402 // CHECK14-NEXT: store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_39]], align 4 6403 // CHECK14-NEXT: [[TMP26:%.*]] = load i32, i32* [[N]], align 4 6404 // CHECK14-NEXT: store i32 [[TMP26]], i32* [[DOTCAPTURE_EXPR_41]], align 4 6405 // CHECK14-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 6406 // CHECK14-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP27]], 0 6407 // CHECK14-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 6408 // CHECK14-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 6409 // CHECK14-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 6410 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB46]], align 4 6411 // CHECK14-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 6412 // CHECK14-NEXT: store i32 [[TMP28]], i32* [[DOTOMP_UB47]], align 4 6413 // CHECK14-NEXT: store i32 0, i32* [[I48]], align 4 6414 // CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 6415 // CHECK14-NEXT: [[CMP49:%.*]] = icmp slt i32 0, [[TMP29]] 6416 // CHECK14-NEXT: br i1 [[CMP49]], label [[SIMD_IF_THEN50:%.*]], label [[SIMD_IF_END68:%.*]] 6417 // CHECK14: simd.if.then50: 6418 // CHECK14-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_LB46]], align 4 6419 // CHECK14-NEXT: store i32 [[TMP30]], i32* [[DOTOMP_IV51]], align 4 6420 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND53:%.*]] 6421 // CHECK14: omp.inner.for.cond53: 6422 // CHECK14-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9 6423 // CHECK14-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_UB47]], align 4, !llvm.access.group !9 6424 // CHECK14-NEXT: [[CMP54:%.*]] = icmp sle i32 [[TMP31]], [[TMP32]] 6425 // CHECK14-NEXT: br i1 [[CMP54]], label [[OMP_INNER_FOR_BODY55:%.*]], label [[OMP_INNER_FOR_END63:%.*]] 6426 // CHECK14: omp.inner.for.body55: 6427 // CHECK14-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9 6428 // CHECK14-NEXT: [[MUL56:%.*]] = mul nsw i32 [[TMP33]], 1 6429 // CHECK14-NEXT: [[ADD57:%.*]] = add nsw i32 0, [[MUL56]] 6430 // CHECK14-NEXT: store i32 [[ADD57]], i32* [[I52]], align 4, !llvm.access.group !9 6431 // CHECK14-NEXT: [[TMP34:%.*]] = load i32, i32* [[I52]], align 4, !llvm.access.group !9 6432 // CHECK14-NEXT: [[IDXPROM58:%.*]] = sext i32 [[TMP34]] to i64 6433 // CHECK14-NEXT: [[ARRAYIDX59:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 [[IDXPROM58]] 6434 // CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX59]], align 4, !llvm.access.group !9 6435 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE60:%.*]] 6436 // CHECK14: omp.body.continue60: 6437 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC61:%.*]] 6438 // CHECK14: omp.inner.for.inc61: 6439 // CHECK14-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9 6440 // CHECK14-NEXT: [[ADD62:%.*]] = add nsw i32 [[TMP35]], 1 6441 // CHECK14-NEXT: store i32 [[ADD62]], i32* [[DOTOMP_IV51]], align 4, !llvm.access.group !9 6442 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND53]], !llvm.loop [[LOOP10:![0-9]+]] 6443 // CHECK14: omp.inner.for.end63: 6444 // CHECK14-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 6445 // CHECK14-NEXT: [[SUB64:%.*]] = sub nsw i32 [[TMP36]], 0 6446 // CHECK14-NEXT: [[DIV65:%.*]] = sdiv i32 [[SUB64]], 1 6447 // CHECK14-NEXT: [[MUL66:%.*]] = mul nsw i32 [[DIV65]], 1 6448 // CHECK14-NEXT: [[ADD67:%.*]] = add nsw i32 0, [[MUL66]] 6449 // CHECK14-NEXT: store i32 [[ADD67]], i32* [[I52]], align 4 6450 // CHECK14-NEXT: br label [[SIMD_IF_END68]] 6451 // CHECK14: simd.if.end68: 6452 // CHECK14-NEXT: [[TMP37:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 6453 // CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP37]]) 6454 // CHECK14-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 6455 // CHECK14-NEXT: [[TMP38:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 6456 // CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP38]]) 6457 // CHECK14-NEXT: [[TMP39:%.*]] = load i32, i32* [[RETVAL]], align 4 6458 // CHECK14-NEXT: ret i32 [[TMP39]] 6459 // 6460 // 6461 // CHECK14-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_ 6462 // CHECK14-SAME: (i32 noundef signext [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat { 6463 // CHECK14-NEXT: entry: 6464 // CHECK14-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 6465 // CHECK14-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 6466 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 6467 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6468 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6469 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6470 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4 6471 // CHECK14-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6472 // CHECK14-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6473 // CHECK14-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6474 // CHECK14-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6475 // CHECK14-NEXT: [[I6:%.*]] = alloca i32, align 4 6476 // CHECK14-NEXT: [[_TMP18:%.*]] = alloca i32, align 4 6477 // CHECK14-NEXT: [[DOTOMP_LB19:%.*]] = alloca i32, align 4 6478 // CHECK14-NEXT: [[DOTOMP_UB20:%.*]] = alloca i32, align 4 6479 // CHECK14-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4 6480 // CHECK14-NEXT: [[I22:%.*]] = alloca i32, align 4 6481 // CHECK14-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 6482 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6483 // CHECK14-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 6484 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6485 // CHECK14-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6486 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6487 // CHECK14: omp.inner.for.cond: 6488 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 6489 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 6490 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6491 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6492 // CHECK14: omp.inner.for.body: 6493 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 6494 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6495 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6496 // CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 6497 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 6498 // CHECK14-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP4]] to i64 6499 // CHECK14-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM]] 6500 // CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !12 6501 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6502 // CHECK14: omp.body.continue: 6503 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6504 // CHECK14: omp.inner.for.inc: 6505 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 6506 // CHECK14-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1 6507 // CHECK14-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 6508 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 6509 // CHECK14: omp.inner.for.end: 6510 // CHECK14-NEXT: store i32 10, i32* [[I]], align 4 6511 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6512 // CHECK14-NEXT: store i32 9, i32* [[DOTOMP_UB4]], align 4 6513 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6514 // CHECK14-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV5]], align 4 6515 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6516 // CHECK14: omp.inner.for.cond7: 6517 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !15 6518 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !15 6519 // CHECK14-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 6520 // CHECK14-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END17:%.*]] 6521 // CHECK14: omp.inner.for.body9: 6522 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !15 6523 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1 6524 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6525 // CHECK14-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !15 6526 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !15 6527 // CHECK14-NEXT: [[IDXPROM12:%.*]] = sext i32 [[TMP10]] to i64 6528 // CHECK14-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM12]] 6529 // CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX13]], align 4, !llvm.access.group !15 6530 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE14:%.*]] 6531 // CHECK14: omp.body.continue14: 6532 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC15:%.*]] 6533 // CHECK14: omp.inner.for.inc15: 6534 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !15 6535 // CHECK14-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP11]], 1 6536 // CHECK14-NEXT: store i32 [[ADD16]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !15 6537 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP16:![0-9]+]] 6538 // CHECK14: omp.inner.for.end17: 6539 // CHECK14-NEXT: store i32 10, i32* [[I6]], align 4 6540 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB19]], align 4 6541 // CHECK14-NEXT: store i32 9, i32* [[DOTOMP_UB20]], align 4 6542 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB19]], align 4 6543 // CHECK14-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV21]], align 4 6544 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]] 6545 // CHECK14: omp.inner.for.cond23: 6546 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !18 6547 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB20]], align 4, !llvm.access.group !18 6548 // CHECK14-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 6549 // CHECK14-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END33:%.*]] 6550 // CHECK14: omp.inner.for.body25: 6551 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !18 6552 // CHECK14-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP15]], 1 6553 // CHECK14-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]] 6554 // CHECK14-NEXT: store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !18 6555 // CHECK14-NEXT: [[TMP16:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !18 6556 // CHECK14-NEXT: [[IDXPROM28:%.*]] = sext i32 [[TMP16]] to i64 6557 // CHECK14-NEXT: [[ARRAYIDX29:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 [[IDXPROM28]] 6558 // CHECK14-NEXT: store i32 0, i32* [[ARRAYIDX29]], align 4, !llvm.access.group !18 6559 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE30:%.*]] 6560 // CHECK14: omp.body.continue30: 6561 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC31:%.*]] 6562 // CHECK14: omp.inner.for.inc31: 6563 // CHECK14-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !18 6564 // CHECK14-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP17]], 1 6565 // CHECK14-NEXT: store i32 [[ADD32]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !18 6566 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP19:![0-9]+]] 6567 // CHECK14: omp.inner.for.end33: 6568 // CHECK14-NEXT: store i32 10, i32* [[I22]], align 4 6569 // CHECK14-NEXT: ret i32 0 6570 // 6571 // 6572 // CHECK15-LABEL: define {{[^@]+}}@main 6573 // CHECK15-SAME: (i32 noundef [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 6574 // CHECK15-NEXT: entry: 6575 // CHECK15-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 6576 // CHECK15-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 6577 // CHECK15-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 4 6578 // CHECK15-NEXT: [[N:%.*]] = alloca i32, align 4 6579 // CHECK15-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 6580 // CHECK15-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 6581 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 6582 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6583 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 6584 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6585 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6586 // CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4 6587 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6588 // CHECK15-NEXT: [[I3:%.*]] = alloca i32, align 4 6589 // CHECK15-NEXT: [[_TMP10:%.*]] = alloca i32, align 4 6590 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 6591 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4 6592 // CHECK15-NEXT: [[DOTOMP_LB16:%.*]] = alloca i32, align 4 6593 // CHECK15-NEXT: [[DOTOMP_UB17:%.*]] = alloca i32, align 4 6594 // CHECK15-NEXT: [[I18:%.*]] = alloca i32, align 4 6595 // CHECK15-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4 6596 // CHECK15-NEXT: [[I22:%.*]] = alloca i32, align 4 6597 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 6598 // CHECK15-NEXT: [[_TMP39:%.*]] = alloca i32, align 4 6599 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 6600 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 6601 // CHECK15-NEXT: [[DOTOMP_LB45:%.*]] = alloca i32, align 4 6602 // CHECK15-NEXT: [[DOTOMP_UB46:%.*]] = alloca i32, align 4 6603 // CHECK15-NEXT: [[I47:%.*]] = alloca i32, align 4 6604 // CHECK15-NEXT: [[DOTOMP_IV50:%.*]] = alloca i32, align 4 6605 // CHECK15-NEXT: [[I51:%.*]] = alloca i32, align 4 6606 // CHECK15-NEXT: store i32 0, i32* [[RETVAL]], align 4 6607 // CHECK15-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 6608 // CHECK15-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 6609 // CHECK15-NEXT: store i32 100, i32* [[N]], align 4 6610 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4 6611 // CHECK15-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 6612 // CHECK15-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 6613 // CHECK15-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 6614 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 6615 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N]], align 4 6616 // CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4 6617 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6618 // CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0 6619 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 6620 // CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 6621 // CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 6622 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6623 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 6624 // CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4 6625 // CHECK15-NEXT: store i32 0, i32* [[I]], align 4 6626 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6627 // CHECK15-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 6628 // CHECK15-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] 6629 // CHECK15: simd.if.then: 6630 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6631 // CHECK15-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 6632 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6633 // CHECK15: omp.inner.for.cond: 6634 // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 6635 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 6636 // CHECK15-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 6637 // CHECK15-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6638 // CHECK15: omp.inner.for.body: 6639 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 6640 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 6641 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6642 // CHECK15-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3 6643 // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3 6644 // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP10]] 6645 // CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !3 6646 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6647 // CHECK15: omp.body.continue: 6648 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6649 // CHECK15: omp.inner.for.inc: 6650 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 6651 // CHECK15-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 6652 // CHECK15-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 6653 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 6654 // CHECK15: omp.inner.for.end: 6655 // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6656 // CHECK15-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP12]], 0 6657 // CHECK15-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 6658 // CHECK15-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1 6659 // CHECK15-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]] 6660 // CHECK15-NEXT: store i32 [[ADD9]], i32* [[I3]], align 4 6661 // CHECK15-NEXT: br label [[SIMD_IF_END]] 6662 // CHECK15: simd.if.end: 6663 // CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[N]], align 4 6664 // CHECK15-NEXT: store i32 [[TMP13]], i32* [[DOTCAPTURE_EXPR_11]], align 4 6665 // CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6666 // CHECK15-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP14]], 0 6667 // CHECK15-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1 6668 // CHECK15-NEXT: [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1 6669 // CHECK15-NEXT: store i32 [[SUB15]], i32* [[DOTCAPTURE_EXPR_12]], align 4 6670 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB16]], align 4 6671 // CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4 6672 // CHECK15-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_UB17]], align 4 6673 // CHECK15-NEXT: store i32 0, i32* [[I18]], align 4 6674 // CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6675 // CHECK15-NEXT: [[CMP19:%.*]] = icmp slt i32 0, [[TMP16]] 6676 // CHECK15-NEXT: br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END37:%.*]] 6677 // CHECK15: simd.if.then20: 6678 // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB16]], align 4 6679 // CHECK15-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV21]], align 4 6680 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]] 6681 // CHECK15: omp.inner.for.cond23: 6682 // CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7 6683 // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB17]], align 4, !llvm.access.group !7 6684 // CHECK15-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] 6685 // CHECK15-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END32:%.*]] 6686 // CHECK15: omp.inner.for.body25: 6687 // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7 6688 // CHECK15-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP20]], 1 6689 // CHECK15-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]] 6690 // CHECK15-NEXT: store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !7 6691 // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !7 6692 // CHECK15-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP21]] 6693 // CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX28]], align 4, !llvm.access.group !7 6694 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE29:%.*]] 6695 // CHECK15: omp.body.continue29: 6696 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC30:%.*]] 6697 // CHECK15: omp.inner.for.inc30: 6698 // CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7 6699 // CHECK15-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP22]], 1 6700 // CHECK15-NEXT: store i32 [[ADD31]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7 6701 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP8:![0-9]+]] 6702 // CHECK15: omp.inner.for.end32: 6703 // CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6704 // CHECK15-NEXT: [[SUB33:%.*]] = sub nsw i32 [[TMP23]], 0 6705 // CHECK15-NEXT: [[DIV34:%.*]] = sdiv i32 [[SUB33]], 1 6706 // CHECK15-NEXT: [[MUL35:%.*]] = mul nsw i32 [[DIV34]], 1 6707 // CHECK15-NEXT: [[ADD36:%.*]] = add nsw i32 0, [[MUL35]] 6708 // CHECK15-NEXT: store i32 [[ADD36]], i32* [[I22]], align 4 6709 // CHECK15-NEXT: br label [[SIMD_IF_END37]] 6710 // CHECK15: simd.if.end37: 6711 // CHECK15-NEXT: [[TMP24:%.*]] = load i32, i32* [[N]], align 4 6712 // CHECK15-NEXT: store i32 [[TMP24]], i32* [[DOTCAPTURE_EXPR_38]], align 4 6713 // CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[N]], align 4 6714 // CHECK15-NEXT: store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_40]], align 4 6715 // CHECK15-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 6716 // CHECK15-NEXT: [[SUB42:%.*]] = sub nsw i32 [[TMP26]], 0 6717 // CHECK15-NEXT: [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1 6718 // CHECK15-NEXT: [[SUB44:%.*]] = sub nsw i32 [[DIV43]], 1 6719 // CHECK15-NEXT: store i32 [[SUB44]], i32* [[DOTCAPTURE_EXPR_41]], align 4 6720 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB45]], align 4 6721 // CHECK15-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 6722 // CHECK15-NEXT: store i32 [[TMP27]], i32* [[DOTOMP_UB46]], align 4 6723 // CHECK15-NEXT: store i32 0, i32* [[I47]], align 4 6724 // CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 6725 // CHECK15-NEXT: [[CMP48:%.*]] = icmp slt i32 0, [[TMP28]] 6726 // CHECK15-NEXT: br i1 [[CMP48]], label [[SIMD_IF_THEN49:%.*]], label [[SIMD_IF_END66:%.*]] 6727 // CHECK15: simd.if.then49: 6728 // CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_LB45]], align 4 6729 // CHECK15-NEXT: store i32 [[TMP29]], i32* [[DOTOMP_IV50]], align 4 6730 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND52:%.*]] 6731 // CHECK15: omp.inner.for.cond52: 6732 // CHECK15-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10 6733 // CHECK15-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_UB46]], align 4, !llvm.access.group !10 6734 // CHECK15-NEXT: [[CMP53:%.*]] = icmp sle i32 [[TMP30]], [[TMP31]] 6735 // CHECK15-NEXT: br i1 [[CMP53]], label [[OMP_INNER_FOR_BODY54:%.*]], label [[OMP_INNER_FOR_END61:%.*]] 6736 // CHECK15: omp.inner.for.body54: 6737 // CHECK15-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10 6738 // CHECK15-NEXT: [[MUL55:%.*]] = mul nsw i32 [[TMP32]], 1 6739 // CHECK15-NEXT: [[ADD56:%.*]] = add nsw i32 0, [[MUL55]] 6740 // CHECK15-NEXT: store i32 [[ADD56]], i32* [[I51]], align 4, !llvm.access.group !10 6741 // CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[I51]], align 4, !llvm.access.group !10 6742 // CHECK15-NEXT: [[ARRAYIDX57:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP33]] 6743 // CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX57]], align 4, !llvm.access.group !10 6744 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE58:%.*]] 6745 // CHECK15: omp.body.continue58: 6746 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC59:%.*]] 6747 // CHECK15: omp.inner.for.inc59: 6748 // CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10 6749 // CHECK15-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP34]], 1 6750 // CHECK15-NEXT: store i32 [[ADD60]], i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10 6751 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND52]], !llvm.loop [[LOOP11:![0-9]+]] 6752 // CHECK15: omp.inner.for.end61: 6753 // CHECK15-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 6754 // CHECK15-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP35]], 0 6755 // CHECK15-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 6756 // CHECK15-NEXT: [[MUL64:%.*]] = mul nsw i32 [[DIV63]], 1 6757 // CHECK15-NEXT: [[ADD65:%.*]] = add nsw i32 0, [[MUL64]] 6758 // CHECK15-NEXT: store i32 [[ADD65]], i32* [[I51]], align 4 6759 // CHECK15-NEXT: br label [[SIMD_IF_END66]] 6760 // CHECK15: simd.if.end66: 6761 // CHECK15-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 6762 // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP36]]) 6763 // CHECK15-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 6764 // CHECK15-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 6765 // CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) 6766 // CHECK15-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 6767 // CHECK15-NEXT: ret i32 [[TMP38]] 6768 // 6769 // 6770 // CHECK15-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_ 6771 // CHECK15-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat { 6772 // CHECK15-NEXT: entry: 6773 // CHECK15-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 6774 // CHECK15-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 6775 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 6776 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6777 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6778 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6779 // CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4 6780 // CHECK15-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6781 // CHECK15-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6782 // CHECK15-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6783 // CHECK15-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6784 // CHECK15-NEXT: [[I6:%.*]] = alloca i32, align 4 6785 // CHECK15-NEXT: [[_TMP17:%.*]] = alloca i32, align 4 6786 // CHECK15-NEXT: [[DOTOMP_LB18:%.*]] = alloca i32, align 4 6787 // CHECK15-NEXT: [[DOTOMP_UB19:%.*]] = alloca i32, align 4 6788 // CHECK15-NEXT: [[DOTOMP_IV20:%.*]] = alloca i32, align 4 6789 // CHECK15-NEXT: [[I21:%.*]] = alloca i32, align 4 6790 // CHECK15-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 6791 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6792 // CHECK15-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 6793 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6794 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6795 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6796 // CHECK15: omp.inner.for.cond: 6797 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 6798 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 6799 // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6800 // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6801 // CHECK15: omp.inner.for.body: 6802 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 6803 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6804 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6805 // CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13 6806 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13 6807 // CHECK15-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP4]] 6808 // CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13 6809 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6810 // CHECK15: omp.body.continue: 6811 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6812 // CHECK15: omp.inner.for.inc: 6813 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 6814 // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1 6815 // CHECK15-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 6816 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 6817 // CHECK15: omp.inner.for.end: 6818 // CHECK15-NEXT: store i32 10, i32* [[I]], align 4 6819 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6820 // CHECK15-NEXT: store i32 9, i32* [[DOTOMP_UB4]], align 4 6821 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6822 // CHECK15-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV5]], align 4 6823 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6824 // CHECK15: omp.inner.for.cond7: 6825 // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 6826 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !16 6827 // CHECK15-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 6828 // CHECK15-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] 6829 // CHECK15: omp.inner.for.body9: 6830 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 6831 // CHECK15-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1 6832 // CHECK15-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6833 // CHECK15-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !16 6834 // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !16 6835 // CHECK15-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP10]] 6836 // CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX12]], align 4, !llvm.access.group !16 6837 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] 6838 // CHECK15: omp.body.continue13: 6839 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] 6840 // CHECK15: omp.inner.for.inc14: 6841 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 6842 // CHECK15-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP11]], 1 6843 // CHECK15-NEXT: store i32 [[ADD15]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 6844 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP17:![0-9]+]] 6845 // CHECK15: omp.inner.for.end16: 6846 // CHECK15-NEXT: store i32 10, i32* [[I6]], align 4 6847 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB18]], align 4 6848 // CHECK15-NEXT: store i32 9, i32* [[DOTOMP_UB19]], align 4 6849 // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB18]], align 4 6850 // CHECK15-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV20]], align 4 6851 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]] 6852 // CHECK15: omp.inner.for.cond22: 6853 // CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !19 6854 // CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB19]], align 4, !llvm.access.group !19 6855 // CHECK15-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 6856 // CHECK15-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END31:%.*]] 6857 // CHECK15: omp.inner.for.body24: 6858 // CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !19 6859 // CHECK15-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP15]], 1 6860 // CHECK15-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]] 6861 // CHECK15-NEXT: store i32 [[ADD26]], i32* [[I21]], align 4, !llvm.access.group !19 6862 // CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[I21]], align 4, !llvm.access.group !19 6863 // CHECK15-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP16]] 6864 // CHECK15-NEXT: store i32 0, i32* [[ARRAYIDX27]], align 4, !llvm.access.group !19 6865 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]] 6866 // CHECK15: omp.body.continue28: 6867 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]] 6868 // CHECK15: omp.inner.for.inc29: 6869 // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !19 6870 // CHECK15-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP17]], 1 6871 // CHECK15-NEXT: store i32 [[ADD30]], i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !19 6872 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP20:![0-9]+]] 6873 // CHECK15: omp.inner.for.end31: 6874 // CHECK15-NEXT: store i32 10, i32* [[I21]], align 4 6875 // CHECK15-NEXT: ret i32 0 6876 // 6877 // 6878 // CHECK16-LABEL: define {{[^@]+}}@main 6879 // CHECK16-SAME: (i32 noundef [[ARGC:%.*]], i8** noundef [[ARGV:%.*]]) #[[ATTR0:[0-9]+]] { 6880 // CHECK16-NEXT: entry: 6881 // CHECK16-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 6882 // CHECK16-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 6883 // CHECK16-NEXT: [[ARGV_ADDR:%.*]] = alloca i8**, align 4 6884 // CHECK16-NEXT: [[N:%.*]] = alloca i32, align 4 6885 // CHECK16-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 4 6886 // CHECK16-NEXT: [[__VLA_EXPR0:%.*]] = alloca i32, align 4 6887 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 6888 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 6889 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 6890 // CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6891 // CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6892 // CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4 6893 // CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6894 // CHECK16-NEXT: [[I3:%.*]] = alloca i32, align 4 6895 // CHECK16-NEXT: [[_TMP10:%.*]] = alloca i32, align 4 6896 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 6897 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4 6898 // CHECK16-NEXT: [[DOTOMP_LB16:%.*]] = alloca i32, align 4 6899 // CHECK16-NEXT: [[DOTOMP_UB17:%.*]] = alloca i32, align 4 6900 // CHECK16-NEXT: [[I18:%.*]] = alloca i32, align 4 6901 // CHECK16-NEXT: [[DOTOMP_IV21:%.*]] = alloca i32, align 4 6902 // CHECK16-NEXT: [[I22:%.*]] = alloca i32, align 4 6903 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 6904 // CHECK16-NEXT: [[_TMP39:%.*]] = alloca i32, align 4 6905 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 6906 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 6907 // CHECK16-NEXT: [[DOTOMP_LB45:%.*]] = alloca i32, align 4 6908 // CHECK16-NEXT: [[DOTOMP_UB46:%.*]] = alloca i32, align 4 6909 // CHECK16-NEXT: [[I47:%.*]] = alloca i32, align 4 6910 // CHECK16-NEXT: [[DOTOMP_IV50:%.*]] = alloca i32, align 4 6911 // CHECK16-NEXT: [[I51:%.*]] = alloca i32, align 4 6912 // CHECK16-NEXT: store i32 0, i32* [[RETVAL]], align 4 6913 // CHECK16-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 6914 // CHECK16-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 6915 // CHECK16-NEXT: store i32 100, i32* [[N]], align 4 6916 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[N]], align 4 6917 // CHECK16-NEXT: [[TMP1:%.*]] = call i8* @llvm.stacksave() 6918 // CHECK16-NEXT: store i8* [[TMP1]], i8** [[SAVED_STACK]], align 4 6919 // CHECK16-NEXT: [[VLA:%.*]] = alloca i32, i32 [[TMP0]], align 4 6920 // CHECK16-NEXT: store i32 [[TMP0]], i32* [[__VLA_EXPR0]], align 4 6921 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N]], align 4 6922 // CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR_]], align 4 6923 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6924 // CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0 6925 // CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 6926 // CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 6927 // CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 6928 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6929 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 6930 // CHECK16-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_UB]], align 4 6931 // CHECK16-NEXT: store i32 0, i32* [[I]], align 4 6932 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6933 // CHECK16-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]] 6934 // CHECK16-NEXT: br i1 [[CMP]], label [[SIMD_IF_THEN:%.*]], label [[SIMD_IF_END:%.*]] 6935 // CHECK16: simd.if.then: 6936 // CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6937 // CHECK16-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 6938 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6939 // CHECK16: omp.inner.for.cond: 6940 // CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 6941 // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 6942 // CHECK16-NEXT: [[CMP4:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 6943 // CHECK16-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6944 // CHECK16: omp.inner.for.body: 6945 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 6946 // CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 6947 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6948 // CHECK16-NEXT: store i32 [[ADD]], i32* [[I3]], align 4, !llvm.access.group !3 6949 // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[I3]], align 4, !llvm.access.group !3 6950 // CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP10]] 6951 // CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !3 6952 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6953 // CHECK16: omp.body.continue: 6954 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6955 // CHECK16: omp.inner.for.inc: 6956 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 6957 // CHECK16-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP11]], 1 6958 // CHECK16-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 6959 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 6960 // CHECK16: omp.inner.for.end: 6961 // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 6962 // CHECK16-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP12]], 0 6963 // CHECK16-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 6964 // CHECK16-NEXT: [[MUL8:%.*]] = mul nsw i32 [[DIV7]], 1 6965 // CHECK16-NEXT: [[ADD9:%.*]] = add nsw i32 0, [[MUL8]] 6966 // CHECK16-NEXT: store i32 [[ADD9]], i32* [[I3]], align 4 6967 // CHECK16-NEXT: br label [[SIMD_IF_END]] 6968 // CHECK16: simd.if.end: 6969 // CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[N]], align 4 6970 // CHECK16-NEXT: store i32 [[TMP13]], i32* [[DOTCAPTURE_EXPR_11]], align 4 6971 // CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6972 // CHECK16-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP14]], 0 6973 // CHECK16-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1 6974 // CHECK16-NEXT: [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1 6975 // CHECK16-NEXT: store i32 [[SUB15]], i32* [[DOTCAPTURE_EXPR_12]], align 4 6976 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB16]], align 4 6977 // CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4 6978 // CHECK16-NEXT: store i32 [[TMP15]], i32* [[DOTOMP_UB17]], align 4 6979 // CHECK16-NEXT: store i32 0, i32* [[I18]], align 4 6980 // CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 6981 // CHECK16-NEXT: [[CMP19:%.*]] = icmp slt i32 0, [[TMP16]] 6982 // CHECK16-NEXT: br i1 [[CMP19]], label [[SIMD_IF_THEN20:%.*]], label [[SIMD_IF_END37:%.*]] 6983 // CHECK16: simd.if.then20: 6984 // CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_LB16]], align 4 6985 // CHECK16-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV21]], align 4 6986 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND23:%.*]] 6987 // CHECK16: omp.inner.for.cond23: 6988 // CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7 6989 // CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_UB17]], align 4, !llvm.access.group !7 6990 // CHECK16-NEXT: [[CMP24:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] 6991 // CHECK16-NEXT: br i1 [[CMP24]], label [[OMP_INNER_FOR_BODY25:%.*]], label [[OMP_INNER_FOR_END32:%.*]] 6992 // CHECK16: omp.inner.for.body25: 6993 // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7 6994 // CHECK16-NEXT: [[MUL26:%.*]] = mul nsw i32 [[TMP20]], 1 6995 // CHECK16-NEXT: [[ADD27:%.*]] = add nsw i32 0, [[MUL26]] 6996 // CHECK16-NEXT: store i32 [[ADD27]], i32* [[I22]], align 4, !llvm.access.group !7 6997 // CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[I22]], align 4, !llvm.access.group !7 6998 // CHECK16-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP21]] 6999 // CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX28]], align 4, !llvm.access.group !7 7000 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE29:%.*]] 7001 // CHECK16: omp.body.continue29: 7002 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC30:%.*]] 7003 // CHECK16: omp.inner.for.inc30: 7004 // CHECK16-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7 7005 // CHECK16-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP22]], 1 7006 // CHECK16-NEXT: store i32 [[ADD31]], i32* [[DOTOMP_IV21]], align 4, !llvm.access.group !7 7007 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND23]], !llvm.loop [[LOOP8:![0-9]+]] 7008 // CHECK16: omp.inner.for.end32: 7009 // CHECK16-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 7010 // CHECK16-NEXT: [[SUB33:%.*]] = sub nsw i32 [[TMP23]], 0 7011 // CHECK16-NEXT: [[DIV34:%.*]] = sdiv i32 [[SUB33]], 1 7012 // CHECK16-NEXT: [[MUL35:%.*]] = mul nsw i32 [[DIV34]], 1 7013 // CHECK16-NEXT: [[ADD36:%.*]] = add nsw i32 0, [[MUL35]] 7014 // CHECK16-NEXT: store i32 [[ADD36]], i32* [[I22]], align 4 7015 // CHECK16-NEXT: br label [[SIMD_IF_END37]] 7016 // CHECK16: simd.if.end37: 7017 // CHECK16-NEXT: [[TMP24:%.*]] = load i32, i32* [[N]], align 4 7018 // CHECK16-NEXT: store i32 [[TMP24]], i32* [[DOTCAPTURE_EXPR_38]], align 4 7019 // CHECK16-NEXT: [[TMP25:%.*]] = load i32, i32* [[N]], align 4 7020 // CHECK16-NEXT: store i32 [[TMP25]], i32* [[DOTCAPTURE_EXPR_40]], align 4 7021 // CHECK16-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 7022 // CHECK16-NEXT: [[SUB42:%.*]] = sub nsw i32 [[TMP26]], 0 7023 // CHECK16-NEXT: [[DIV43:%.*]] = sdiv i32 [[SUB42]], 1 7024 // CHECK16-NEXT: [[SUB44:%.*]] = sub nsw i32 [[DIV43]], 1 7025 // CHECK16-NEXT: store i32 [[SUB44]], i32* [[DOTCAPTURE_EXPR_41]], align 4 7026 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB45]], align 4 7027 // CHECK16-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 7028 // CHECK16-NEXT: store i32 [[TMP27]], i32* [[DOTOMP_UB46]], align 4 7029 // CHECK16-NEXT: store i32 0, i32* [[I47]], align 4 7030 // CHECK16-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 7031 // CHECK16-NEXT: [[CMP48:%.*]] = icmp slt i32 0, [[TMP28]] 7032 // CHECK16-NEXT: br i1 [[CMP48]], label [[SIMD_IF_THEN49:%.*]], label [[SIMD_IF_END66:%.*]] 7033 // CHECK16: simd.if.then49: 7034 // CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_LB45]], align 4 7035 // CHECK16-NEXT: store i32 [[TMP29]], i32* [[DOTOMP_IV50]], align 4 7036 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND52:%.*]] 7037 // CHECK16: omp.inner.for.cond52: 7038 // CHECK16-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10 7039 // CHECK16-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_UB46]], align 4, !llvm.access.group !10 7040 // CHECK16-NEXT: [[CMP53:%.*]] = icmp sle i32 [[TMP30]], [[TMP31]] 7041 // CHECK16-NEXT: br i1 [[CMP53]], label [[OMP_INNER_FOR_BODY54:%.*]], label [[OMP_INNER_FOR_END61:%.*]] 7042 // CHECK16: omp.inner.for.body54: 7043 // CHECK16-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10 7044 // CHECK16-NEXT: [[MUL55:%.*]] = mul nsw i32 [[TMP32]], 1 7045 // CHECK16-NEXT: [[ADD56:%.*]] = add nsw i32 0, [[MUL55]] 7046 // CHECK16-NEXT: store i32 [[ADD56]], i32* [[I51]], align 4, !llvm.access.group !10 7047 // CHECK16-NEXT: [[TMP33:%.*]] = load i32, i32* [[I51]], align 4, !llvm.access.group !10 7048 // CHECK16-NEXT: [[ARRAYIDX57:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 [[TMP33]] 7049 // CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX57]], align 4, !llvm.access.group !10 7050 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE58:%.*]] 7051 // CHECK16: omp.body.continue58: 7052 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC59:%.*]] 7053 // CHECK16: omp.inner.for.inc59: 7054 // CHECK16-NEXT: [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10 7055 // CHECK16-NEXT: [[ADD60:%.*]] = add nsw i32 [[TMP34]], 1 7056 // CHECK16-NEXT: store i32 [[ADD60]], i32* [[DOTOMP_IV50]], align 4, !llvm.access.group !10 7057 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND52]], !llvm.loop [[LOOP11:![0-9]+]] 7058 // CHECK16: omp.inner.for.end61: 7059 // CHECK16-NEXT: [[TMP35:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 7060 // CHECK16-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP35]], 0 7061 // CHECK16-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 7062 // CHECK16-NEXT: [[MUL64:%.*]] = mul nsw i32 [[DIV63]], 1 7063 // CHECK16-NEXT: [[ADD65:%.*]] = add nsw i32 0, [[MUL64]] 7064 // CHECK16-NEXT: store i32 [[ADD65]], i32* [[I51]], align 4 7065 // CHECK16-NEXT: br label [[SIMD_IF_END66]] 7066 // CHECK16: simd.if.end66: 7067 // CHECK16-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 7068 // CHECK16-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP36]]) 7069 // CHECK16-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 7070 // CHECK16-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 7071 // CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) 7072 // CHECK16-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 7073 // CHECK16-NEXT: ret i32 [[TMP38]] 7074 // 7075 // 7076 // CHECK16-LABEL: define {{[^@]+}}@_Z5tmainIiLi10EEiT_ 7077 // CHECK16-SAME: (i32 noundef [[ARGC:%.*]]) #[[ATTR2:[0-9]+]] comdat { 7078 // CHECK16-NEXT: entry: 7079 // CHECK16-NEXT: [[ARGC_ADDR:%.*]] = alloca i32, align 4 7080 // CHECK16-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 7081 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 7082 // CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7083 // CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7084 // CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7085 // CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4 7086 // CHECK16-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 7087 // CHECK16-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 7088 // CHECK16-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 7089 // CHECK16-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 7090 // CHECK16-NEXT: [[I6:%.*]] = alloca i32, align 4 7091 // CHECK16-NEXT: [[_TMP17:%.*]] = alloca i32, align 4 7092 // CHECK16-NEXT: [[DOTOMP_LB18:%.*]] = alloca i32, align 4 7093 // CHECK16-NEXT: [[DOTOMP_UB19:%.*]] = alloca i32, align 4 7094 // CHECK16-NEXT: [[DOTOMP_IV20:%.*]] = alloca i32, align 4 7095 // CHECK16-NEXT: [[I21:%.*]] = alloca i32, align 4 7096 // CHECK16-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 7097 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7098 // CHECK16-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 7099 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7100 // CHECK16-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 7101 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7102 // CHECK16: omp.inner.for.cond: 7103 // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 7104 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 7105 // CHECK16-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 7106 // CHECK16-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7107 // CHECK16: omp.inner.for.body: 7108 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 7109 // CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 7110 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 7111 // CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13 7112 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !13 7113 // CHECK16-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP4]] 7114 // CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX]], align 4, !llvm.access.group !13 7115 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7116 // CHECK16: omp.body.continue: 7117 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7118 // CHECK16: omp.inner.for.inc: 7119 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 7120 // CHECK16-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], 1 7121 // CHECK16-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 7122 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 7123 // CHECK16: omp.inner.for.end: 7124 // CHECK16-NEXT: store i32 10, i32* [[I]], align 4 7125 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 7126 // CHECK16-NEXT: store i32 9, i32* [[DOTOMP_UB4]], align 4 7127 // CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 7128 // CHECK16-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV5]], align 4 7129 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 7130 // CHECK16: omp.inner.for.cond7: 7131 // CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 7132 // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !16 7133 // CHECK16-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 7134 // CHECK16-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END16:%.*]] 7135 // CHECK16: omp.inner.for.body9: 7136 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 7137 // CHECK16-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP9]], 1 7138 // CHECK16-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 7139 // CHECK16-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !16 7140 // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[I6]], align 4, !llvm.access.group !16 7141 // CHECK16-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP10]] 7142 // CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX12]], align 4, !llvm.access.group !16 7143 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE13:%.*]] 7144 // CHECK16: omp.body.continue13: 7145 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC14:%.*]] 7146 // CHECK16: omp.inner.for.inc14: 7147 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 7148 // CHECK16-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP11]], 1 7149 // CHECK16-NEXT: store i32 [[ADD15]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 7150 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP17:![0-9]+]] 7151 // CHECK16: omp.inner.for.end16: 7152 // CHECK16-NEXT: store i32 10, i32* [[I6]], align 4 7153 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB18]], align 4 7154 // CHECK16-NEXT: store i32 9, i32* [[DOTOMP_UB19]], align 4 7155 // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_LB18]], align 4 7156 // CHECK16-NEXT: store i32 [[TMP12]], i32* [[DOTOMP_IV20]], align 4 7157 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]] 7158 // CHECK16: omp.inner.for.cond22: 7159 // CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !19 7160 // CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB19]], align 4, !llvm.access.group !19 7161 // CHECK16-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 7162 // CHECK16-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END31:%.*]] 7163 // CHECK16: omp.inner.for.body24: 7164 // CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !19 7165 // CHECK16-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP15]], 1 7166 // CHECK16-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]] 7167 // CHECK16-NEXT: store i32 [[ADD26]], i32* [[I21]], align 4, !llvm.access.group !19 7168 // CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[I21]], align 4, !llvm.access.group !19 7169 // CHECK16-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 [[TMP16]] 7170 // CHECK16-NEXT: store i32 0, i32* [[ARRAYIDX27]], align 4, !llvm.access.group !19 7171 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE28:%.*]] 7172 // CHECK16: omp.body.continue28: 7173 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC29:%.*]] 7174 // CHECK16: omp.inner.for.inc29: 7175 // CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !19 7176 // CHECK16-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP17]], 1 7177 // CHECK16-NEXT: store i32 [[ADD30]], i32* [[DOTOMP_IV20]], align 4, !llvm.access.group !19 7178 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP20:![0-9]+]] 7179 // CHECK16: omp.inner.for.end31: 7180 // CHECK16-NEXT: store i32 10, i32* [[I21]], align 4 7181 // CHECK16-NEXT: ret i32 0 7182 // 7183