1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s 4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2 5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3 6 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s 7 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4 8 9 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5 10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s 11 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6 12 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7 13 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple x86_64-unknown-unknown -emit-pch -o %t %s 14 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple x86_64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8 15 16 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9 17 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -emit-pch -o %t %s 18 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10 19 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11 20 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -emit-pch -o %t %s 21 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12 22 23 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK13 24 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -emit-pch -o %t %s 25 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=45 -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK14 26 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK15 27 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -std=c++11 -triple aarch64-unknown-unknown -emit-pch -o %t %s 28 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-targets=powerpc64le-ibm-linux-gnu -x c++ -triple aarch64-unknown-unknown -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK16 29 // expected-no-diagnostics 30 #ifndef HEADER 31 #define HEADER 32 33 void fn1(); 34 void fn2(); 35 void fn3(); 36 void fn4(); 37 void fn5(); 38 void fn6(); 39 40 int Arg; 41 42 void gtid_test() { 43 #pragma omp target 44 #pragma omp teams 45 #pragma omp distribute parallel for simd 46 for(int i = 0 ; i < 100; i++) {} 47 48 #pragma omp target 49 #pragma omp teams 50 #pragma omp distribute parallel for simd if (parallel: false) 51 for(int i = 0 ; i < 100; i++) { 52 gtid_test(); 53 } 54 } 55 56 57 template <typename T> 58 int tmain(T Arg) { 59 #pragma omp target 60 #pragma omp teams 61 #pragma omp distribute parallel for simd if (true) 62 for(int i = 0 ; i < 100; i++) { 63 fn1(); 64 } 65 #pragma omp target 66 #pragma omp teams 67 #pragma omp distribute parallel for simd if (false) 68 for(int i = 0 ; i < 100; i++) { 69 fn2(); 70 } 71 #pragma omp target 72 #pragma omp teams 73 #pragma omp distribute parallel for simd if (parallel: Arg) 74 for(int i = 0 ; i < 100; i++) { 75 fn3(); 76 } 77 return 0; 78 } 79 80 int main() { 81 #pragma omp target 82 #pragma omp teams 83 #pragma omp distribute parallel for simd if (true) 84 for(int i = 0 ; i < 100; i++) { 85 86 87 fn4(); 88 } 89 90 #pragma omp target 91 #pragma omp teams 92 #pragma omp distribute parallel for simd if (false) 93 for(int i = 0 ; i < 100; i++) { 94 95 96 fn5(); 97 } 98 99 #pragma omp target 100 #pragma omp teams 101 #pragma omp distribute parallel for simd if (Arg) 102 for(int i = 0 ; i < 100; i++) { 103 104 105 fn6(); 106 } 107 108 return tmain(Arg); 109 } 110 111 112 113 114 115 116 // call void [[T_OUTLINE_FUN_3:@.+]]( 117 118 119 #endif 120 // CHECK1-LABEL: define {{[^@]+}}@_Z9gtid_testv 121 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 122 // CHECK1-NEXT: entry: 123 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 124 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 125 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100) 126 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 127 // CHECK1-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 128 // CHECK1-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 129 // CHECK1: omp_offload.failed: 130 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43() #[[ATTR2:[0-9]+]] 131 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 132 // CHECK1: omp_offload.cont: 133 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 134 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 135 // CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 136 // CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 137 // CHECK1: omp_offload.failed2: 138 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48() #[[ATTR2]] 139 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT3]] 140 // CHECK1: omp_offload.cont3: 141 // CHECK1-NEXT: ret void 142 // 143 // 144 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43 145 // CHECK1-SAME: () #[[ATTR1:[0-9]+]] { 146 // CHECK1-NEXT: entry: 147 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 148 // CHECK1-NEXT: ret void 149 // 150 // 151 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 152 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 153 // CHECK1-NEXT: entry: 154 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 155 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 156 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 157 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 158 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 159 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 160 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 161 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 162 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 163 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 164 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 165 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 166 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 167 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 168 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 169 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 170 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 171 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 172 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 173 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 174 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 175 // CHECK1: cond.true: 176 // CHECK1-NEXT: br label [[COND_END:%.*]] 177 // CHECK1: cond.false: 178 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 179 // CHECK1-NEXT: br label [[COND_END]] 180 // CHECK1: cond.end: 181 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 182 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 183 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 184 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 185 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 186 // CHECK1: omp.inner.for.cond: 187 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 188 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11 189 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 190 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 191 // CHECK1: omp.inner.for.body: 192 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11 193 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 194 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11 195 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 196 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !11 197 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 198 // CHECK1: omp.inner.for.inc: 199 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 200 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11 201 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 202 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 203 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 204 // CHECK1: omp.inner.for.end: 205 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 206 // CHECK1: omp.loop.exit: 207 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 208 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 209 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 210 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 211 // CHECK1: .omp.final.then: 212 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 213 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 214 // CHECK1: .omp.final.done: 215 // CHECK1-NEXT: ret void 216 // 217 // 218 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 219 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 220 // CHECK1-NEXT: entry: 221 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 222 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 223 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 224 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 225 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 226 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 227 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 228 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 229 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 230 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 231 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 232 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 233 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 234 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 235 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 236 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 237 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 238 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 239 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 240 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 241 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 242 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 243 // CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 244 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 245 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 246 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 247 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 248 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 249 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 250 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 251 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 252 // CHECK1: cond.true: 253 // CHECK1-NEXT: br label [[COND_END:%.*]] 254 // CHECK1: cond.false: 255 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 256 // CHECK1-NEXT: br label [[COND_END]] 257 // CHECK1: cond.end: 258 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 259 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 260 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 261 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 262 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 263 // CHECK1: omp.inner.for.cond: 264 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 265 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15 266 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 267 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 268 // CHECK1: omp.inner.for.body: 269 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 270 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 271 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 272 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !15 273 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 274 // CHECK1: omp.body.continue: 275 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 276 // CHECK1: omp.inner.for.inc: 277 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 278 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 279 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 280 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 281 // CHECK1: omp.inner.for.end: 282 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 283 // CHECK1: omp.loop.exit: 284 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 285 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 286 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 287 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 288 // CHECK1: .omp.final.then: 289 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 290 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 291 // CHECK1: .omp.final.done: 292 // CHECK1-NEXT: ret void 293 // 294 // 295 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48 296 // CHECK1-SAME: () #[[ATTR1]] { 297 // CHECK1-NEXT: entry: 298 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*)) 299 // CHECK1-NEXT: ret void 300 // 301 // 302 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..2 303 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 304 // CHECK1-NEXT: entry: 305 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 306 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 307 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 308 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 309 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 310 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 311 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 312 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 313 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 314 // CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 315 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 316 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 317 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 318 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 319 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 320 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 321 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 322 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 323 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 324 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 325 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 326 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 327 // CHECK1: cond.true: 328 // CHECK1-NEXT: br label [[COND_END:%.*]] 329 // CHECK1: cond.false: 330 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 331 // CHECK1-NEXT: br label [[COND_END]] 332 // CHECK1: cond.end: 333 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 334 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 335 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 336 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 337 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 338 // CHECK1: omp.inner.for.cond: 339 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 340 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20 341 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 342 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 343 // CHECK1: omp.inner.for.body: 344 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20 345 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 346 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20 347 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 348 // CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !20 349 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !20 350 // CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !20 351 // CHECK1-NEXT: call void @.omp_outlined..3(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !20 352 // CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !20 353 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 354 // CHECK1: omp.inner.for.inc: 355 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 356 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20 357 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 358 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 359 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 360 // CHECK1: omp.inner.for.end: 361 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 362 // CHECK1: omp.loop.exit: 363 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 364 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 365 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 366 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 367 // CHECK1: .omp.final.then: 368 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 369 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 370 // CHECK1: .omp.final.done: 371 // CHECK1-NEXT: ret void 372 // 373 // 374 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..3 375 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 376 // CHECK1-NEXT: entry: 377 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 378 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 379 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 380 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 381 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 382 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 383 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 384 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 385 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 386 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 387 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 388 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 389 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 390 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 391 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 392 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 393 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 394 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 395 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 396 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 397 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 398 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 399 // CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 400 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 401 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 402 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 403 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 404 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 405 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 406 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 407 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 408 // CHECK1: cond.true: 409 // CHECK1-NEXT: br label [[COND_END:%.*]] 410 // CHECK1: cond.false: 411 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 412 // CHECK1-NEXT: br label [[COND_END]] 413 // CHECK1: cond.end: 414 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 415 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 416 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 417 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 418 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 419 // CHECK1: omp.inner.for.cond: 420 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 421 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23 422 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 423 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 424 // CHECK1: omp.inner.for.body: 425 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 426 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 427 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 428 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !23 429 // CHECK1-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !23 430 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 431 // CHECK1: omp.body.continue: 432 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 433 // CHECK1: omp.inner.for.inc: 434 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 435 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 436 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 437 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] 438 // CHECK1: omp.inner.for.end: 439 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 440 // CHECK1: omp.loop.exit: 441 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 442 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 443 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 444 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 445 // CHECK1: .omp.final.then: 446 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 447 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 448 // CHECK1: .omp.final.done: 449 // CHECK1-NEXT: ret void 450 // 451 // 452 // CHECK1-LABEL: define {{[^@]+}}@main 453 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] { 454 // CHECK1-NEXT: entry: 455 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 456 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 457 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 458 // CHECK1-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 459 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 460 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 461 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 462 // CHECK1-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 463 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 464 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 465 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 466 // CHECK1-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 467 // CHECK1-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 468 // CHECK1: omp_offload.failed: 469 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81() #[[ATTR2]] 470 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 471 // CHECK1: omp_offload.cont: 472 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 473 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 474 // CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 475 // CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 476 // CHECK1: omp_offload.failed2: 477 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90() #[[ATTR2]] 478 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT3]] 479 // CHECK1: omp_offload.cont3: 480 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* @Arg, align 4 481 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 482 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 483 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 484 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 485 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 486 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 487 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 488 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 489 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 490 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 491 // CHECK1-NEXT: store i8* null, i8** [[TMP10]], align 8 492 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 493 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 494 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 495 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 496 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 497 // CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 498 // CHECK1: omp_offload.failed5: 499 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99(i64 [[TMP5]]) #[[ATTR2]] 500 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT6]] 501 // CHECK1: omp_offload.cont6: 502 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 503 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 504 // CHECK1-NEXT: ret i32 [[CALL]] 505 // 506 // 507 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 508 // CHECK1-SAME: () #[[ATTR1]] { 509 // CHECK1-NEXT: entry: 510 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 511 // CHECK1-NEXT: ret void 512 // 513 // 514 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4 515 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 516 // CHECK1-NEXT: entry: 517 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 518 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 519 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 520 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 521 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 522 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 523 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 524 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 525 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 526 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 527 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 528 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 529 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 530 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 531 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 532 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 533 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 534 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 535 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 536 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 537 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 538 // CHECK1: cond.true: 539 // CHECK1-NEXT: br label [[COND_END:%.*]] 540 // CHECK1: cond.false: 541 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 542 // CHECK1-NEXT: br label [[COND_END]] 543 // CHECK1: cond.end: 544 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 545 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 546 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 547 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 548 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 549 // CHECK1: omp.inner.for.cond: 550 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 551 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26 552 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 553 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 554 // CHECK1: omp.inner.for.body: 555 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26 556 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 557 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26 558 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 559 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !26 560 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 561 // CHECK1: omp.inner.for.inc: 562 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 563 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26 564 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 565 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 566 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 567 // CHECK1: omp.inner.for.end: 568 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 569 // CHECK1: omp.loop.exit: 570 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 571 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 572 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 573 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 574 // CHECK1: .omp.final.then: 575 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 576 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 577 // CHECK1: .omp.final.done: 578 // CHECK1-NEXT: ret void 579 // 580 // 581 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..5 582 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 583 // CHECK1-NEXT: entry: 584 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 585 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 586 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 587 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 588 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 589 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 590 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 591 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 592 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 593 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 594 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 595 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 596 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 597 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 598 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 599 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 600 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 601 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 602 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 603 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 604 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 605 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 606 // CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 607 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 608 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 609 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 610 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 611 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 612 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 613 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 614 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 615 // CHECK1: cond.true: 616 // CHECK1-NEXT: br label [[COND_END:%.*]] 617 // CHECK1: cond.false: 618 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 619 // CHECK1-NEXT: br label [[COND_END]] 620 // CHECK1: cond.end: 621 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 622 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 623 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 624 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 625 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 626 // CHECK1: omp.inner.for.cond: 627 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 628 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29 629 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 630 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 631 // CHECK1: omp.inner.for.body: 632 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 633 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 634 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 635 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !29 636 // CHECK1-NEXT: call void @_Z3fn4v(), !llvm.access.group !29 637 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 638 // CHECK1: omp.body.continue: 639 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 640 // CHECK1: omp.inner.for.inc: 641 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 642 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 643 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 644 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 645 // CHECK1: omp.inner.for.end: 646 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 647 // CHECK1: omp.loop.exit: 648 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 649 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 650 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 651 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 652 // CHECK1: .omp.final.then: 653 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 654 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 655 // CHECK1: .omp.final.done: 656 // CHECK1-NEXT: ret void 657 // 658 // 659 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90 660 // CHECK1-SAME: () #[[ATTR1]] { 661 // CHECK1-NEXT: entry: 662 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*)) 663 // CHECK1-NEXT: ret void 664 // 665 // 666 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..6 667 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 668 // CHECK1-NEXT: entry: 669 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 670 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 671 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 672 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 673 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 674 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 675 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 676 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 677 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 678 // CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 679 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 680 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 681 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 682 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 683 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 684 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 685 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 686 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 687 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 688 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 689 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 690 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 691 // CHECK1: cond.true: 692 // CHECK1-NEXT: br label [[COND_END:%.*]] 693 // CHECK1: cond.false: 694 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 695 // CHECK1-NEXT: br label [[COND_END]] 696 // CHECK1: cond.end: 697 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 698 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 699 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 700 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 701 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 702 // CHECK1: omp.inner.for.cond: 703 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 704 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32 705 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 706 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 707 // CHECK1: omp.inner.for.body: 708 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32 709 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 710 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32 711 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 712 // CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !32 713 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !32 714 // CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !32 715 // CHECK1-NEXT: call void @.omp_outlined..7(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !32 716 // CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !32 717 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 718 // CHECK1: omp.inner.for.inc: 719 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 720 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32 721 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 722 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 723 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] 724 // CHECK1: omp.inner.for.end: 725 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 726 // CHECK1: omp.loop.exit: 727 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 728 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 729 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 730 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 731 // CHECK1: .omp.final.then: 732 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 733 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 734 // CHECK1: .omp.final.done: 735 // CHECK1-NEXT: ret void 736 // 737 // 738 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7 739 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 740 // CHECK1-NEXT: entry: 741 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 742 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 743 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 744 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 745 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 746 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 747 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 748 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 749 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 750 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 751 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 752 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 753 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 754 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 755 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 756 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 757 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 758 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 759 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 760 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 761 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 762 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 763 // CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 764 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 765 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 766 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 767 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 768 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 769 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 770 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 771 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 772 // CHECK1: cond.true: 773 // CHECK1-NEXT: br label [[COND_END:%.*]] 774 // CHECK1: cond.false: 775 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 776 // CHECK1-NEXT: br label [[COND_END]] 777 // CHECK1: cond.end: 778 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 779 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 780 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 781 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 782 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 783 // CHECK1: omp.inner.for.cond: 784 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 785 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35 786 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 787 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 788 // CHECK1: omp.inner.for.body: 789 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 790 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 791 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 792 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !35 793 // CHECK1-NEXT: call void @_Z3fn5v(), !llvm.access.group !35 794 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 795 // CHECK1: omp.body.continue: 796 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 797 // CHECK1: omp.inner.for.inc: 798 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 799 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 800 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 801 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 802 // CHECK1: omp.inner.for.end: 803 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 804 // CHECK1: omp.loop.exit: 805 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 806 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 807 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 808 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 809 // CHECK1: .omp.final.then: 810 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 811 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 812 // CHECK1: .omp.final.done: 813 // CHECK1-NEXT: ret void 814 // 815 // 816 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99 817 // CHECK1-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 818 // CHECK1-NEXT: entry: 819 // CHECK1-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 820 // CHECK1-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 821 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 822 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]]) 823 // CHECK1-NEXT: ret void 824 // 825 // 826 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..8 827 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 828 // CHECK1-NEXT: entry: 829 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 830 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 831 // CHECK1-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 832 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 833 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 834 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 835 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 836 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 837 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 838 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 839 // CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 840 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 841 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 842 // CHECK1-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 843 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 844 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 845 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 846 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 847 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 848 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 849 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 850 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 851 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 852 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 853 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 854 // CHECK1: cond.true: 855 // CHECK1-NEXT: br label [[COND_END:%.*]] 856 // CHECK1: cond.false: 857 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 858 // CHECK1-NEXT: br label [[COND_END]] 859 // CHECK1: cond.end: 860 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 861 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 862 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 863 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 864 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 865 // CHECK1: omp.inner.for.cond: 866 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38 867 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38 868 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 869 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 870 // CHECK1: omp.inner.for.body: 871 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38 872 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 873 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38 874 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 875 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !38 876 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 877 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 878 // CHECK1: omp_if.then: 879 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !38 880 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 881 // CHECK1: omp_if.else: 882 // CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !38 883 // CHECK1-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !38 884 // CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !38 885 // CHECK1-NEXT: call void @.omp_outlined..9(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !38 886 // CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !38 887 // CHECK1-NEXT: br label [[OMP_IF_END]] 888 // CHECK1: omp_if.end: 889 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 890 // CHECK1: omp.inner.for.inc: 891 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38 892 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38 893 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 894 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38 895 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] 896 // CHECK1: omp.inner.for.end: 897 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 898 // CHECK1: omp.loop.exit: 899 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 900 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 901 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 902 // CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 903 // CHECK1: .omp.final.then: 904 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 905 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 906 // CHECK1: .omp.final.done: 907 // CHECK1-NEXT: ret void 908 // 909 // 910 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..9 911 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 912 // CHECK1-NEXT: entry: 913 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 914 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 915 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 916 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 917 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 918 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 919 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 920 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 921 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 922 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 923 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 924 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 925 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 926 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 927 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 928 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 929 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 930 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 931 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 932 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 933 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 934 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 935 // CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 936 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 937 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 938 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 939 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 940 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 941 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 942 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 943 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 944 // CHECK1: cond.true: 945 // CHECK1-NEXT: br label [[COND_END:%.*]] 946 // CHECK1: cond.false: 947 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 948 // CHECK1-NEXT: br label [[COND_END]] 949 // CHECK1: cond.end: 950 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 951 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 952 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 953 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 954 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 955 // CHECK1: omp.inner.for.cond: 956 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 957 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41 958 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 959 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 960 // CHECK1: omp.inner.for.body: 961 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 962 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 963 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 964 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !41 965 // CHECK1-NEXT: call void @_Z3fn6v(), !llvm.access.group !41 966 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 967 // CHECK1: omp.body.continue: 968 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 969 // CHECK1: omp.inner.for.inc: 970 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 971 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 972 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 973 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] 974 // CHECK1: omp.inner.for.end: 975 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 976 // CHECK1: omp.loop.exit: 977 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 978 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 979 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 980 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 981 // CHECK1: .omp.final.then: 982 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 983 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 984 // CHECK1: .omp.final.done: 985 // CHECK1-NEXT: ret void 986 // 987 // 988 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 989 // CHECK1-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 990 // CHECK1-NEXT: entry: 991 // CHECK1-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 992 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 993 // CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 994 // CHECK1-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 995 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 996 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 997 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 998 // CHECK1-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 999 // CHECK1-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 1000 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 1001 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 1002 // CHECK1-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 1003 // CHECK1-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1004 // CHECK1: omp_offload.failed: 1005 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59() #[[ATTR2]] 1006 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 1007 // CHECK1: omp_offload.cont: 1008 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 1009 // CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 1010 // CHECK1-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 1011 // CHECK1-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 1012 // CHECK1: omp_offload.failed2: 1013 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65() #[[ATTR2]] 1014 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT3]] 1015 // CHECK1: omp_offload.cont3: 1016 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 1017 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 1018 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 1019 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 1020 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1021 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 1022 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 1023 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1024 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 1025 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 1026 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1027 // CHECK1-NEXT: store i8* null, i8** [[TMP10]], align 8 1028 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1029 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1030 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 1031 // CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 1032 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 1033 // CHECK1-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 1034 // CHECK1: omp_offload.failed5: 1035 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71(i64 [[TMP5]]) #[[ATTR2]] 1036 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT6]] 1037 // CHECK1: omp_offload.cont6: 1038 // CHECK1-NEXT: ret i32 0 1039 // 1040 // 1041 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59 1042 // CHECK1-SAME: () #[[ATTR1]] { 1043 // CHECK1-NEXT: entry: 1044 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 1045 // CHECK1-NEXT: ret void 1046 // 1047 // 1048 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10 1049 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1050 // CHECK1-NEXT: entry: 1051 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1052 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1053 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1054 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 1055 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1056 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1057 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1058 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1059 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 1060 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1061 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1062 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1063 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 1064 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1065 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1066 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1067 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1068 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1069 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1070 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 1071 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1072 // CHECK1: cond.true: 1073 // CHECK1-NEXT: br label [[COND_END:%.*]] 1074 // CHECK1: cond.false: 1075 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1076 // CHECK1-NEXT: br label [[COND_END]] 1077 // CHECK1: cond.end: 1078 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 1079 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1080 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1081 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 1082 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1083 // CHECK1: omp.inner.for.cond: 1084 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44 1085 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44 1086 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 1087 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1088 // CHECK1: omp.inner.for.body: 1089 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44 1090 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 1091 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44 1092 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 1093 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !44 1094 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1095 // CHECK1: omp.inner.for.inc: 1096 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44 1097 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44 1098 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 1099 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44 1100 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] 1101 // CHECK1: omp.inner.for.end: 1102 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1103 // CHECK1: omp.loop.exit: 1104 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 1105 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1106 // CHECK1-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 1107 // CHECK1-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1108 // CHECK1: .omp.final.then: 1109 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 1110 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1111 // CHECK1: .omp.final.done: 1112 // CHECK1-NEXT: ret void 1113 // 1114 // 1115 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 1116 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 1117 // CHECK1-NEXT: entry: 1118 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1119 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1120 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 1121 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 1122 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1123 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 1124 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1125 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1126 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1127 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1128 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 1129 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1130 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1131 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1132 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1133 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1134 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 1135 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1136 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 1137 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1138 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 1139 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 1140 // CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 1141 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1142 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1143 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1144 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 1145 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1146 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1147 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 1148 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1149 // CHECK1: cond.true: 1150 // CHECK1-NEXT: br label [[COND_END:%.*]] 1151 // CHECK1: cond.false: 1152 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1153 // CHECK1-NEXT: br label [[COND_END]] 1154 // CHECK1: cond.end: 1155 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 1156 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1157 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1158 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 1159 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1160 // CHECK1: omp.inner.for.cond: 1161 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 1162 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47 1163 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 1164 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1165 // CHECK1: omp.inner.for.body: 1166 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 1167 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 1168 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1169 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !47 1170 // CHECK1-NEXT: call void @_Z3fn1v(), !llvm.access.group !47 1171 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1172 // CHECK1: omp.body.continue: 1173 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1174 // CHECK1: omp.inner.for.inc: 1175 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 1176 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 1177 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 1178 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] 1179 // CHECK1: omp.inner.for.end: 1180 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1181 // CHECK1: omp.loop.exit: 1182 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 1183 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1184 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1185 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1186 // CHECK1: .omp.final.then: 1187 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 1188 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1189 // CHECK1: .omp.final.done: 1190 // CHECK1-NEXT: ret void 1191 // 1192 // 1193 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65 1194 // CHECK1-SAME: () #[[ATTR1]] { 1195 // CHECK1-NEXT: entry: 1196 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..12 to void (i32*, i32*, ...)*)) 1197 // CHECK1-NEXT: ret void 1198 // 1199 // 1200 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..12 1201 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1202 // CHECK1-NEXT: entry: 1203 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1204 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1205 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1206 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 1207 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1208 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1209 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1210 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1211 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 1212 // CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 1213 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1214 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1215 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1216 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 1217 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1218 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1219 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1220 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1221 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1222 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1223 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 1224 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1225 // CHECK1: cond.true: 1226 // CHECK1-NEXT: br label [[COND_END:%.*]] 1227 // CHECK1: cond.false: 1228 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1229 // CHECK1-NEXT: br label [[COND_END]] 1230 // CHECK1: cond.end: 1231 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 1232 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1233 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1234 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 1235 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1236 // CHECK1: omp.inner.for.cond: 1237 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 1238 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50 1239 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 1240 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1241 // CHECK1: omp.inner.for.body: 1242 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50 1243 // CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 1244 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50 1245 // CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 1246 // CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !50 1247 // CHECK1-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !50 1248 // CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !50 1249 // CHECK1-NEXT: call void @.omp_outlined..13(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !50 1250 // CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !50 1251 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1252 // CHECK1: omp.inner.for.inc: 1253 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 1254 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50 1255 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 1256 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 1257 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] 1258 // CHECK1: omp.inner.for.end: 1259 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1260 // CHECK1: omp.loop.exit: 1261 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 1262 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1263 // CHECK1-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 1264 // CHECK1-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1265 // CHECK1: .omp.final.then: 1266 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 1267 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1268 // CHECK1: .omp.final.done: 1269 // CHECK1-NEXT: ret void 1270 // 1271 // 1272 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13 1273 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 1274 // CHECK1-NEXT: entry: 1275 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1276 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1277 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 1278 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 1279 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1280 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 1281 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1282 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1283 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1284 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1285 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 1286 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1287 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1288 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1289 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1290 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1291 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 1292 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1293 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 1294 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1295 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 1296 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 1297 // CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 1298 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1299 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1300 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1301 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 1302 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1303 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1304 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 1305 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1306 // CHECK1: cond.true: 1307 // CHECK1-NEXT: br label [[COND_END:%.*]] 1308 // CHECK1: cond.false: 1309 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1310 // CHECK1-NEXT: br label [[COND_END]] 1311 // CHECK1: cond.end: 1312 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 1313 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1314 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1315 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 1316 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1317 // CHECK1: omp.inner.for.cond: 1318 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 1319 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53 1320 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 1321 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1322 // CHECK1: omp.inner.for.body: 1323 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 1324 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 1325 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1326 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !53 1327 // CHECK1-NEXT: call void @_Z3fn2v(), !llvm.access.group !53 1328 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1329 // CHECK1: omp.body.continue: 1330 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1331 // CHECK1: omp.inner.for.inc: 1332 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 1333 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 1334 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 1335 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] 1336 // CHECK1: omp.inner.for.end: 1337 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1338 // CHECK1: omp.loop.exit: 1339 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 1340 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1341 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1342 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1343 // CHECK1: .omp.final.then: 1344 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 1345 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1346 // CHECK1: .omp.final.done: 1347 // CHECK1-NEXT: ret void 1348 // 1349 // 1350 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71 1351 // CHECK1-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 1352 // CHECK1-NEXT: entry: 1353 // CHECK1-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 1354 // CHECK1-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 1355 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 1356 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]]) 1357 // CHECK1-NEXT: ret void 1358 // 1359 // 1360 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14 1361 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 1362 // CHECK1-NEXT: entry: 1363 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1364 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1365 // CHECK1-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 1366 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1367 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 1368 // CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1369 // CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1370 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1371 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1372 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 1373 // CHECK1-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 1374 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1375 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1376 // CHECK1-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 1377 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 1378 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1379 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 1380 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1381 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1382 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1383 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1384 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1385 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1386 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 1387 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1388 // CHECK1: cond.true: 1389 // CHECK1-NEXT: br label [[COND_END:%.*]] 1390 // CHECK1: cond.false: 1391 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1392 // CHECK1-NEXT: br label [[COND_END]] 1393 // CHECK1: cond.end: 1394 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1395 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1396 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1397 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1398 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1399 // CHECK1: omp.inner.for.cond: 1400 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56 1401 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56 1402 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1403 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1404 // CHECK1: omp.inner.for.body: 1405 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !56 1406 // CHECK1-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 1407 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56 1408 // CHECK1-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 1409 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !56 1410 // CHECK1-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 1411 // CHECK1-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 1412 // CHECK1: omp_if.then: 1413 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !56 1414 // CHECK1-NEXT: br label [[OMP_IF_END:%.*]] 1415 // CHECK1: omp_if.else: 1416 // CHECK1-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !56 1417 // CHECK1-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !56 1418 // CHECK1-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !56 1419 // CHECK1-NEXT: call void @.omp_outlined..15(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !56 1420 // CHECK1-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !56 1421 // CHECK1-NEXT: br label [[OMP_IF_END]] 1422 // CHECK1: omp_if.end: 1423 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1424 // CHECK1: omp.inner.for.inc: 1425 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56 1426 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !56 1427 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 1428 // CHECK1-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56 1429 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]] 1430 // CHECK1: omp.inner.for.end: 1431 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1432 // CHECK1: omp.loop.exit: 1433 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1434 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1435 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 1436 // CHECK1-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1437 // CHECK1: .omp.final.then: 1438 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 1439 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1440 // CHECK1: .omp.final.done: 1441 // CHECK1-NEXT: ret void 1442 // 1443 // 1444 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15 1445 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 1446 // CHECK1-NEXT: entry: 1447 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1448 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1449 // CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 1450 // CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 1451 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1452 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 1453 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1454 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1455 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1456 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1457 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 1458 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1459 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1460 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1461 // CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1462 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1463 // CHECK1-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 1464 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1465 // CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 1466 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1467 // CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 1468 // CHECK1-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 1469 // CHECK1-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 1470 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1471 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1472 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1473 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 1474 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1475 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1476 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 1477 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1478 // CHECK1: cond.true: 1479 // CHECK1-NEXT: br label [[COND_END:%.*]] 1480 // CHECK1: cond.false: 1481 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1482 // CHECK1-NEXT: br label [[COND_END]] 1483 // CHECK1: cond.end: 1484 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 1485 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1486 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1487 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 1488 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1489 // CHECK1: omp.inner.for.cond: 1490 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 1491 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !59 1492 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 1493 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1494 // CHECK1: omp.inner.for.body: 1495 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 1496 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 1497 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1498 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !59 1499 // CHECK1-NEXT: call void @_Z3fn3v(), !llvm.access.group !59 1500 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1501 // CHECK1: omp.body.continue: 1502 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1503 // CHECK1: omp.inner.for.inc: 1504 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 1505 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 1506 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 1507 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]] 1508 // CHECK1: omp.inner.for.end: 1509 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1510 // CHECK1: omp.loop.exit: 1511 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 1512 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1513 // CHECK1-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1514 // CHECK1-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1515 // CHECK1: .omp.final.then: 1516 // CHECK1-NEXT: store i32 100, i32* [[I]], align 4 1517 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 1518 // CHECK1: .omp.final.done: 1519 // CHECK1-NEXT: ret void 1520 // 1521 // 1522 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1523 // CHECK1-SAME: () #[[ATTR5:[0-9]+]] { 1524 // CHECK1-NEXT: entry: 1525 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 1526 // CHECK1-NEXT: ret void 1527 // 1528 // 1529 // CHECK2-LABEL: define {{[^@]+}}@_Z9gtid_testv 1530 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] { 1531 // CHECK2-NEXT: entry: 1532 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1533 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 1534 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100) 1535 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 1536 // CHECK2-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 1537 // CHECK2-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1538 // CHECK2: omp_offload.failed: 1539 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43() #[[ATTR2:[0-9]+]] 1540 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 1541 // CHECK2: omp_offload.cont: 1542 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 1543 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 1544 // CHECK2-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 1545 // CHECK2-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 1546 // CHECK2: omp_offload.failed2: 1547 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48() #[[ATTR2]] 1548 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT3]] 1549 // CHECK2: omp_offload.cont3: 1550 // CHECK2-NEXT: ret void 1551 // 1552 // 1553 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43 1554 // CHECK2-SAME: () #[[ATTR1:[0-9]+]] { 1555 // CHECK2-NEXT: entry: 1556 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 1557 // CHECK2-NEXT: ret void 1558 // 1559 // 1560 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 1561 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1562 // CHECK2-NEXT: entry: 1563 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1564 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1565 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1566 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1567 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1568 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1569 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1570 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1571 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1572 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1573 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1574 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1575 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 1576 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1577 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1578 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1579 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1580 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1581 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1582 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 1583 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1584 // CHECK2: cond.true: 1585 // CHECK2-NEXT: br label [[COND_END:%.*]] 1586 // CHECK2: cond.false: 1587 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1588 // CHECK2-NEXT: br label [[COND_END]] 1589 // CHECK2: cond.end: 1590 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 1591 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1592 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1593 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 1594 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1595 // CHECK2: omp.inner.for.cond: 1596 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 1597 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11 1598 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 1599 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1600 // CHECK2: omp.inner.for.body: 1601 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11 1602 // CHECK2-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 1603 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11 1604 // CHECK2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 1605 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !11 1606 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1607 // CHECK2: omp.inner.for.inc: 1608 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 1609 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11 1610 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 1611 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 1612 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 1613 // CHECK2: omp.inner.for.end: 1614 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1615 // CHECK2: omp.loop.exit: 1616 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 1617 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1618 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 1619 // CHECK2-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1620 // CHECK2: .omp.final.then: 1621 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 1622 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 1623 // CHECK2: .omp.final.done: 1624 // CHECK2-NEXT: ret void 1625 // 1626 // 1627 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1 1628 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 1629 // CHECK2-NEXT: entry: 1630 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1631 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1632 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 1633 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 1634 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1635 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1636 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1637 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1638 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1639 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1640 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1641 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1642 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1643 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1644 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1645 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1646 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 1647 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1648 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 1649 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1650 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 1651 // CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 1652 // CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 1653 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1654 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1655 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1656 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 1657 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1658 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1659 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 1660 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1661 // CHECK2: cond.true: 1662 // CHECK2-NEXT: br label [[COND_END:%.*]] 1663 // CHECK2: cond.false: 1664 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1665 // CHECK2-NEXT: br label [[COND_END]] 1666 // CHECK2: cond.end: 1667 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 1668 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1669 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1670 // CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 1671 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1672 // CHECK2: omp.inner.for.cond: 1673 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 1674 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15 1675 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 1676 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1677 // CHECK2: omp.inner.for.body: 1678 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 1679 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 1680 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1681 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !15 1682 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1683 // CHECK2: omp.body.continue: 1684 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1685 // CHECK2: omp.inner.for.inc: 1686 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 1687 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 1688 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 1689 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 1690 // CHECK2: omp.inner.for.end: 1691 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1692 // CHECK2: omp.loop.exit: 1693 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 1694 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1695 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1696 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1697 // CHECK2: .omp.final.then: 1698 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 1699 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 1700 // CHECK2: .omp.final.done: 1701 // CHECK2-NEXT: ret void 1702 // 1703 // 1704 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48 1705 // CHECK2-SAME: () #[[ATTR1]] { 1706 // CHECK2-NEXT: entry: 1707 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*)) 1708 // CHECK2-NEXT: ret void 1709 // 1710 // 1711 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..2 1712 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1713 // CHECK2-NEXT: entry: 1714 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1715 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1716 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1717 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1718 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1719 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1720 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1721 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1722 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1723 // CHECK2-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 1724 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1725 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1726 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1727 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 1728 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1729 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1730 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1731 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1732 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1733 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1734 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 1735 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1736 // CHECK2: cond.true: 1737 // CHECK2-NEXT: br label [[COND_END:%.*]] 1738 // CHECK2: cond.false: 1739 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1740 // CHECK2-NEXT: br label [[COND_END]] 1741 // CHECK2: cond.end: 1742 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 1743 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1744 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1745 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 1746 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1747 // CHECK2: omp.inner.for.cond: 1748 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 1749 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20 1750 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 1751 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1752 // CHECK2: omp.inner.for.body: 1753 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20 1754 // CHECK2-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 1755 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20 1756 // CHECK2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 1757 // CHECK2-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !20 1758 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !20 1759 // CHECK2-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !20 1760 // CHECK2-NEXT: call void @.omp_outlined..3(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !20 1761 // CHECK2-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !20 1762 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1763 // CHECK2: omp.inner.for.inc: 1764 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 1765 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20 1766 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 1767 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 1768 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 1769 // CHECK2: omp.inner.for.end: 1770 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1771 // CHECK2: omp.loop.exit: 1772 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 1773 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1774 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 1775 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1776 // CHECK2: .omp.final.then: 1777 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 1778 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 1779 // CHECK2: .omp.final.done: 1780 // CHECK2-NEXT: ret void 1781 // 1782 // 1783 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3 1784 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 1785 // CHECK2-NEXT: entry: 1786 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1787 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1788 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 1789 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 1790 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1791 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1792 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1793 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1794 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1795 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1796 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1797 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1798 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1799 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1800 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1801 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1802 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 1803 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 1804 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 1805 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 1806 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 1807 // CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 1808 // CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 1809 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1810 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1811 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1812 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 1813 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1814 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1815 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 1816 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1817 // CHECK2: cond.true: 1818 // CHECK2-NEXT: br label [[COND_END:%.*]] 1819 // CHECK2: cond.false: 1820 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1821 // CHECK2-NEXT: br label [[COND_END]] 1822 // CHECK2: cond.end: 1823 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 1824 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1825 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1826 // CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 1827 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1828 // CHECK2: omp.inner.for.cond: 1829 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 1830 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23 1831 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 1832 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1833 // CHECK2: omp.inner.for.body: 1834 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 1835 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 1836 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1837 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !23 1838 // CHECK2-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !23 1839 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1840 // CHECK2: omp.body.continue: 1841 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1842 // CHECK2: omp.inner.for.inc: 1843 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 1844 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 1845 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 1846 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] 1847 // CHECK2: omp.inner.for.end: 1848 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1849 // CHECK2: omp.loop.exit: 1850 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 1851 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1852 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 1853 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1854 // CHECK2: .omp.final.then: 1855 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 1856 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 1857 // CHECK2: .omp.final.done: 1858 // CHECK2-NEXT: ret void 1859 // 1860 // 1861 // CHECK2-LABEL: define {{[^@]+}}@main 1862 // CHECK2-SAME: () #[[ATTR3:[0-9]+]] { 1863 // CHECK2-NEXT: entry: 1864 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1865 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1866 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 1867 // CHECK2-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 1868 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 1869 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 1870 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 1871 // CHECK2-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 1872 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 1873 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 1874 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 1875 // CHECK2-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 1876 // CHECK2-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1877 // CHECK2: omp_offload.failed: 1878 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81() #[[ATTR2]] 1879 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 1880 // CHECK2: omp_offload.cont: 1881 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 1882 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 1883 // CHECK2-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 1884 // CHECK2-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 1885 // CHECK2: omp_offload.failed2: 1886 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90() #[[ATTR2]] 1887 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT3]] 1888 // CHECK2: omp_offload.cont3: 1889 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* @Arg, align 4 1890 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 1891 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 1892 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 1893 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1894 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 1895 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 1896 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1897 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 1898 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 1899 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1900 // CHECK2-NEXT: store i8* null, i8** [[TMP10]], align 8 1901 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1902 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1903 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 1904 // CHECK2-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 1905 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 1906 // CHECK2-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 1907 // CHECK2: omp_offload.failed5: 1908 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99(i64 [[TMP5]]) #[[ATTR2]] 1909 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT6]] 1910 // CHECK2: omp_offload.cont6: 1911 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 1912 // CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 1913 // CHECK2-NEXT: ret i32 [[CALL]] 1914 // 1915 // 1916 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 1917 // CHECK2-SAME: () #[[ATTR1]] { 1918 // CHECK2-NEXT: entry: 1919 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 1920 // CHECK2-NEXT: ret void 1921 // 1922 // 1923 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 1924 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1925 // CHECK2-NEXT: entry: 1926 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1927 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1928 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1929 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1930 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1931 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1932 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1933 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1934 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1935 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1936 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1937 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1938 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 1939 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1940 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1941 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1942 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1943 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1944 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1945 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 1946 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1947 // CHECK2: cond.true: 1948 // CHECK2-NEXT: br label [[COND_END:%.*]] 1949 // CHECK2: cond.false: 1950 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1951 // CHECK2-NEXT: br label [[COND_END]] 1952 // CHECK2: cond.end: 1953 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 1954 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1955 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1956 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 1957 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1958 // CHECK2: omp.inner.for.cond: 1959 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 1960 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26 1961 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 1962 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1963 // CHECK2: omp.inner.for.body: 1964 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26 1965 // CHECK2-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 1966 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26 1967 // CHECK2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 1968 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !26 1969 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1970 // CHECK2: omp.inner.for.inc: 1971 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 1972 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26 1973 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 1974 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 1975 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 1976 // CHECK2: omp.inner.for.end: 1977 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1978 // CHECK2: omp.loop.exit: 1979 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 1980 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1981 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 1982 // CHECK2-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1983 // CHECK2: .omp.final.then: 1984 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 1985 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 1986 // CHECK2: .omp.final.done: 1987 // CHECK2-NEXT: ret void 1988 // 1989 // 1990 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..5 1991 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 1992 // CHECK2-NEXT: entry: 1993 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1994 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1995 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 1996 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 1997 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1998 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1999 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2000 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2001 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2002 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2003 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2004 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2005 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2006 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2007 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2008 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2009 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 2010 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2011 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 2012 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2013 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 2014 // CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 2015 // CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 2016 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2017 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2018 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2019 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 2020 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2021 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2022 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 2023 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2024 // CHECK2: cond.true: 2025 // CHECK2-NEXT: br label [[COND_END:%.*]] 2026 // CHECK2: cond.false: 2027 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2028 // CHECK2-NEXT: br label [[COND_END]] 2029 // CHECK2: cond.end: 2030 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 2031 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2032 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2033 // CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 2034 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2035 // CHECK2: omp.inner.for.cond: 2036 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 2037 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29 2038 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 2039 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2040 // CHECK2: omp.inner.for.body: 2041 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 2042 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 2043 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2044 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !29 2045 // CHECK2-NEXT: call void @_Z3fn4v(), !llvm.access.group !29 2046 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2047 // CHECK2: omp.body.continue: 2048 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2049 // CHECK2: omp.inner.for.inc: 2050 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 2051 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 2052 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 2053 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 2054 // CHECK2: omp.inner.for.end: 2055 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2056 // CHECK2: omp.loop.exit: 2057 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 2058 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2059 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2060 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2061 // CHECK2: .omp.final.then: 2062 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2063 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2064 // CHECK2: .omp.final.done: 2065 // CHECK2-NEXT: ret void 2066 // 2067 // 2068 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90 2069 // CHECK2-SAME: () #[[ATTR1]] { 2070 // CHECK2-NEXT: entry: 2071 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*)) 2072 // CHECK2-NEXT: ret void 2073 // 2074 // 2075 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..6 2076 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 2077 // CHECK2-NEXT: entry: 2078 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2079 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2080 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2081 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2082 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 2083 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 2084 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2085 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2086 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2087 // CHECK2-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 2088 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2089 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2090 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 2091 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 2092 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2093 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2094 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2095 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2096 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2097 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2098 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 2099 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2100 // CHECK2: cond.true: 2101 // CHECK2-NEXT: br label [[COND_END:%.*]] 2102 // CHECK2: cond.false: 2103 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2104 // CHECK2-NEXT: br label [[COND_END]] 2105 // CHECK2: cond.end: 2106 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 2107 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 2108 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 2109 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 2110 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2111 // CHECK2: omp.inner.for.cond: 2112 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 2113 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32 2114 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 2115 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2116 // CHECK2: omp.inner.for.body: 2117 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !32 2118 // CHECK2-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 2119 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !32 2120 // CHECK2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 2121 // CHECK2-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !32 2122 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !32 2123 // CHECK2-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !32 2124 // CHECK2-NEXT: call void @.omp_outlined..7(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !32 2125 // CHECK2-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !32 2126 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2127 // CHECK2: omp.inner.for.inc: 2128 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 2129 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !32 2130 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2131 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !32 2132 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP33:![0-9]+]] 2133 // CHECK2: omp.inner.for.end: 2134 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2135 // CHECK2: omp.loop.exit: 2136 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 2137 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2138 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 2139 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2140 // CHECK2: .omp.final.then: 2141 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2142 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2143 // CHECK2: .omp.final.done: 2144 // CHECK2-NEXT: ret void 2145 // 2146 // 2147 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..7 2148 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 2149 // CHECK2-NEXT: entry: 2150 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2151 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2152 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 2153 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 2154 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2155 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2156 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2157 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2158 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2159 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2160 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2161 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2162 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2163 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2164 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2165 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2166 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 2167 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2168 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 2169 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2170 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 2171 // CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 2172 // CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 2173 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2174 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2175 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2176 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 2177 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2178 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2179 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 2180 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2181 // CHECK2: cond.true: 2182 // CHECK2-NEXT: br label [[COND_END:%.*]] 2183 // CHECK2: cond.false: 2184 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2185 // CHECK2-NEXT: br label [[COND_END]] 2186 // CHECK2: cond.end: 2187 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 2188 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2189 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2190 // CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 2191 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2192 // CHECK2: omp.inner.for.cond: 2193 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 2194 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !35 2195 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 2196 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2197 // CHECK2: omp.inner.for.body: 2198 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 2199 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 2200 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2201 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !35 2202 // CHECK2-NEXT: call void @_Z3fn5v(), !llvm.access.group !35 2203 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2204 // CHECK2: omp.body.continue: 2205 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2206 // CHECK2: omp.inner.for.inc: 2207 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 2208 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 2209 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 2210 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 2211 // CHECK2: omp.inner.for.end: 2212 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2213 // CHECK2: omp.loop.exit: 2214 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 2215 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2216 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2217 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2218 // CHECK2: .omp.final.then: 2219 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2220 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2221 // CHECK2: .omp.final.done: 2222 // CHECK2-NEXT: ret void 2223 // 2224 // 2225 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99 2226 // CHECK2-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 2227 // CHECK2-NEXT: entry: 2228 // CHECK2-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 2229 // CHECK2-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 2230 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 2231 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]]) 2232 // CHECK2-NEXT: ret void 2233 // 2234 // 2235 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..8 2236 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 2237 // CHECK2-NEXT: entry: 2238 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2239 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2240 // CHECK2-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 2241 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2242 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2243 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 2244 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 2245 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2246 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2247 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2248 // CHECK2-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 2249 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2250 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2251 // CHECK2-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 2252 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 2253 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 2254 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 2255 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2256 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2257 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2258 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 2259 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2260 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2261 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 2262 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2263 // CHECK2: cond.true: 2264 // CHECK2-NEXT: br label [[COND_END:%.*]] 2265 // CHECK2: cond.false: 2266 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2267 // CHECK2-NEXT: br label [[COND_END]] 2268 // CHECK2: cond.end: 2269 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 2270 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 2271 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 2272 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 2273 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2274 // CHECK2: omp.inner.for.cond: 2275 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38 2276 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38 2277 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 2278 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2279 // CHECK2: omp.inner.for.body: 2280 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !38 2281 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 2282 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !38 2283 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 2284 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !38 2285 // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 2286 // CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 2287 // CHECK2: omp_if.then: 2288 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !38 2289 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 2290 // CHECK2: omp_if.else: 2291 // CHECK2-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !38 2292 // CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !38 2293 // CHECK2-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !38 2294 // CHECK2-NEXT: call void @.omp_outlined..9(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !38 2295 // CHECK2-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !38 2296 // CHECK2-NEXT: br label [[OMP_IF_END]] 2297 // CHECK2: omp_if.end: 2298 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2299 // CHECK2: omp.inner.for.inc: 2300 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38 2301 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !38 2302 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 2303 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !38 2304 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP39:![0-9]+]] 2305 // CHECK2: omp.inner.for.end: 2306 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2307 // CHECK2: omp.loop.exit: 2308 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 2309 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2310 // CHECK2-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 2311 // CHECK2-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2312 // CHECK2: .omp.final.then: 2313 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2314 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2315 // CHECK2: .omp.final.done: 2316 // CHECK2-NEXT: ret void 2317 // 2318 // 2319 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 2320 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 2321 // CHECK2-NEXT: entry: 2322 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2323 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2324 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 2325 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 2326 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2327 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2328 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2329 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2330 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2331 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2332 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2333 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2334 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2335 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2336 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2337 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2338 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 2339 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2340 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 2341 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2342 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 2343 // CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 2344 // CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 2345 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2346 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2347 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2348 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 2349 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2350 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2351 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 2352 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2353 // CHECK2: cond.true: 2354 // CHECK2-NEXT: br label [[COND_END:%.*]] 2355 // CHECK2: cond.false: 2356 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2357 // CHECK2-NEXT: br label [[COND_END]] 2358 // CHECK2: cond.end: 2359 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 2360 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2361 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2362 // CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 2363 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2364 // CHECK2: omp.inner.for.cond: 2365 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 2366 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !41 2367 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 2368 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2369 // CHECK2: omp.inner.for.body: 2370 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 2371 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 2372 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2373 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !41 2374 // CHECK2-NEXT: call void @_Z3fn6v(), !llvm.access.group !41 2375 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2376 // CHECK2: omp.body.continue: 2377 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2378 // CHECK2: omp.inner.for.inc: 2379 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 2380 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 2381 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 2382 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP42:![0-9]+]] 2383 // CHECK2: omp.inner.for.end: 2384 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2385 // CHECK2: omp.loop.exit: 2386 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 2387 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2388 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2389 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2390 // CHECK2: .omp.final.then: 2391 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2392 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2393 // CHECK2: .omp.final.done: 2394 // CHECK2-NEXT: ret void 2395 // 2396 // 2397 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 2398 // CHECK2-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 2399 // CHECK2-NEXT: entry: 2400 // CHECK2-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 2401 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2402 // CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 2403 // CHECK2-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 2404 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 2405 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 2406 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 2407 // CHECK2-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 2408 // CHECK2-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 2409 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 2410 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 2411 // CHECK2-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 2412 // CHECK2-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 2413 // CHECK2: omp_offload.failed: 2414 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59() #[[ATTR2]] 2415 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 2416 // CHECK2: omp_offload.cont: 2417 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 2418 // CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 2419 // CHECK2-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 2420 // CHECK2-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 2421 // CHECK2: omp_offload.failed2: 2422 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65() #[[ATTR2]] 2423 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT3]] 2424 // CHECK2: omp_offload.cont3: 2425 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 2426 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 2427 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 2428 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 2429 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2430 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 2431 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 2432 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2433 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 2434 // CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 2435 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 2436 // CHECK2-NEXT: store i8* null, i8** [[TMP10]], align 8 2437 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 2438 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 2439 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 2440 // CHECK2-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 2441 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 2442 // CHECK2-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 2443 // CHECK2: omp_offload.failed5: 2444 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71(i64 [[TMP5]]) #[[ATTR2]] 2445 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT6]] 2446 // CHECK2: omp_offload.cont6: 2447 // CHECK2-NEXT: ret i32 0 2448 // 2449 // 2450 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59 2451 // CHECK2-SAME: () #[[ATTR1]] { 2452 // CHECK2-NEXT: entry: 2453 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 2454 // CHECK2-NEXT: ret void 2455 // 2456 // 2457 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10 2458 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 2459 // CHECK2-NEXT: entry: 2460 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2461 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2462 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2463 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2464 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 2465 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 2466 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2467 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2468 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2469 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2470 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2471 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 2472 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 2473 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2474 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2475 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2476 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2477 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2478 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2479 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 2480 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2481 // CHECK2: cond.true: 2482 // CHECK2-NEXT: br label [[COND_END:%.*]] 2483 // CHECK2: cond.false: 2484 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2485 // CHECK2-NEXT: br label [[COND_END]] 2486 // CHECK2: cond.end: 2487 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 2488 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 2489 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 2490 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 2491 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2492 // CHECK2: omp.inner.for.cond: 2493 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44 2494 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44 2495 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 2496 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2497 // CHECK2: omp.inner.for.body: 2498 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !44 2499 // CHECK2-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 2500 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !44 2501 // CHECK2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 2502 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !44 2503 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2504 // CHECK2: omp.inner.for.inc: 2505 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44 2506 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !44 2507 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 2508 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !44 2509 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP45:![0-9]+]] 2510 // CHECK2: omp.inner.for.end: 2511 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2512 // CHECK2: omp.loop.exit: 2513 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 2514 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2515 // CHECK2-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 2516 // CHECK2-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2517 // CHECK2: .omp.final.then: 2518 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2519 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2520 // CHECK2: .omp.final.done: 2521 // CHECK2-NEXT: ret void 2522 // 2523 // 2524 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 2525 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 2526 // CHECK2-NEXT: entry: 2527 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2528 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2529 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 2530 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 2531 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2532 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2533 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2534 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2535 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2536 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2537 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2538 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2539 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2540 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2541 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2542 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2543 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 2544 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2545 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 2546 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2547 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 2548 // CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 2549 // CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 2550 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2551 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2552 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2553 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 2554 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2555 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2556 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 2557 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2558 // CHECK2: cond.true: 2559 // CHECK2-NEXT: br label [[COND_END:%.*]] 2560 // CHECK2: cond.false: 2561 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2562 // CHECK2-NEXT: br label [[COND_END]] 2563 // CHECK2: cond.end: 2564 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 2565 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2566 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2567 // CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 2568 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2569 // CHECK2: omp.inner.for.cond: 2570 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 2571 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47 2572 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 2573 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2574 // CHECK2: omp.inner.for.body: 2575 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 2576 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 2577 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2578 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !47 2579 // CHECK2-NEXT: call void @_Z3fn1v(), !llvm.access.group !47 2580 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2581 // CHECK2: omp.body.continue: 2582 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2583 // CHECK2: omp.inner.for.inc: 2584 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 2585 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 2586 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 2587 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] 2588 // CHECK2: omp.inner.for.end: 2589 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2590 // CHECK2: omp.loop.exit: 2591 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 2592 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2593 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2594 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2595 // CHECK2: .omp.final.then: 2596 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2597 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2598 // CHECK2: .omp.final.done: 2599 // CHECK2-NEXT: ret void 2600 // 2601 // 2602 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65 2603 // CHECK2-SAME: () #[[ATTR1]] { 2604 // CHECK2-NEXT: entry: 2605 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..12 to void (i32*, i32*, ...)*)) 2606 // CHECK2-NEXT: ret void 2607 // 2608 // 2609 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..12 2610 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 2611 // CHECK2-NEXT: entry: 2612 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2613 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2614 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2615 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2616 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 2617 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 2618 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2619 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2620 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2621 // CHECK2-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 2622 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2623 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2624 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 2625 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 2626 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2627 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2628 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2629 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2630 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2631 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2632 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 2633 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2634 // CHECK2: cond.true: 2635 // CHECK2-NEXT: br label [[COND_END:%.*]] 2636 // CHECK2: cond.false: 2637 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2638 // CHECK2-NEXT: br label [[COND_END]] 2639 // CHECK2: cond.end: 2640 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 2641 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 2642 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 2643 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 2644 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2645 // CHECK2: omp.inner.for.cond: 2646 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 2647 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50 2648 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 2649 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2650 // CHECK2: omp.inner.for.body: 2651 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !50 2652 // CHECK2-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 2653 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !50 2654 // CHECK2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 2655 // CHECK2-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !50 2656 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !50 2657 // CHECK2-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !50 2658 // CHECK2-NEXT: call void @.omp_outlined..13(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !50 2659 // CHECK2-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !50 2660 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2661 // CHECK2: omp.inner.for.inc: 2662 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 2663 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !50 2664 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2665 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 2666 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] 2667 // CHECK2: omp.inner.for.end: 2668 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2669 // CHECK2: omp.loop.exit: 2670 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 2671 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2672 // CHECK2-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 2673 // CHECK2-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2674 // CHECK2: .omp.final.then: 2675 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2676 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2677 // CHECK2: .omp.final.done: 2678 // CHECK2-NEXT: ret void 2679 // 2680 // 2681 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 2682 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 2683 // CHECK2-NEXT: entry: 2684 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2685 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2686 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 2687 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 2688 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2689 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2690 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2691 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2692 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2693 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2694 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2695 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2696 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2697 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2698 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2699 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2700 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 2701 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2702 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 2703 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2704 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 2705 // CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 2706 // CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 2707 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2708 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2709 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2710 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 2711 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2712 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2713 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 2714 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2715 // CHECK2: cond.true: 2716 // CHECK2-NEXT: br label [[COND_END:%.*]] 2717 // CHECK2: cond.false: 2718 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2719 // CHECK2-NEXT: br label [[COND_END]] 2720 // CHECK2: cond.end: 2721 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 2722 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2723 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2724 // CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 2725 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2726 // CHECK2: omp.inner.for.cond: 2727 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 2728 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !53 2729 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 2730 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2731 // CHECK2: omp.inner.for.body: 2732 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 2733 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 2734 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2735 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !53 2736 // CHECK2-NEXT: call void @_Z3fn2v(), !llvm.access.group !53 2737 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2738 // CHECK2: omp.body.continue: 2739 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2740 // CHECK2: omp.inner.for.inc: 2741 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 2742 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 2743 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 2744 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] 2745 // CHECK2: omp.inner.for.end: 2746 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2747 // CHECK2: omp.loop.exit: 2748 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 2749 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2750 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2751 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2752 // CHECK2: .omp.final.then: 2753 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2754 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2755 // CHECK2: .omp.final.done: 2756 // CHECK2-NEXT: ret void 2757 // 2758 // 2759 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71 2760 // CHECK2-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 2761 // CHECK2-NEXT: entry: 2762 // CHECK2-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 2763 // CHECK2-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 2764 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 2765 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]]) 2766 // CHECK2-NEXT: ret void 2767 // 2768 // 2769 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14 2770 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 2771 // CHECK2-NEXT: entry: 2772 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2773 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2774 // CHECK2-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 2775 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2776 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2777 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 2778 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 2779 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2780 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2781 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2782 // CHECK2-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 2783 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2784 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2785 // CHECK2-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 2786 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 2787 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 2788 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 2789 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2790 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2791 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2792 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 2793 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2794 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2795 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 2796 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2797 // CHECK2: cond.true: 2798 // CHECK2-NEXT: br label [[COND_END:%.*]] 2799 // CHECK2: cond.false: 2800 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2801 // CHECK2-NEXT: br label [[COND_END]] 2802 // CHECK2: cond.end: 2803 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 2804 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 2805 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 2806 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 2807 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2808 // CHECK2: omp.inner.for.cond: 2809 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56 2810 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56 2811 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 2812 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2813 // CHECK2: omp.inner.for.body: 2814 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !56 2815 // CHECK2-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 2816 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !56 2817 // CHECK2-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 2818 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !56 2819 // CHECK2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 2820 // CHECK2-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 2821 // CHECK2: omp_if.then: 2822 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !56 2823 // CHECK2-NEXT: br label [[OMP_IF_END:%.*]] 2824 // CHECK2: omp_if.else: 2825 // CHECK2-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !56 2826 // CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !56 2827 // CHECK2-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !56 2828 // CHECK2-NEXT: call void @.omp_outlined..15(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !56 2829 // CHECK2-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !56 2830 // CHECK2-NEXT: br label [[OMP_IF_END]] 2831 // CHECK2: omp_if.end: 2832 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2833 // CHECK2: omp.inner.for.inc: 2834 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56 2835 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !56 2836 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 2837 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !56 2838 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]] 2839 // CHECK2: omp.inner.for.end: 2840 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2841 // CHECK2: omp.loop.exit: 2842 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 2843 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2844 // CHECK2-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 2845 // CHECK2-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2846 // CHECK2: .omp.final.then: 2847 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2848 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2849 // CHECK2: .omp.final.done: 2850 // CHECK2-NEXT: ret void 2851 // 2852 // 2853 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..15 2854 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 2855 // CHECK2-NEXT: entry: 2856 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2857 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2858 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 2859 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 2860 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2861 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 2862 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 2863 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 2864 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2865 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2866 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2867 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2868 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2869 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2870 // CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2871 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 2872 // CHECK2-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 2873 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 2874 // CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 2875 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 2876 // CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 2877 // CHECK2-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 2878 // CHECK2-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 2879 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2880 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2881 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2882 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 2883 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2884 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2885 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 2886 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2887 // CHECK2: cond.true: 2888 // CHECK2-NEXT: br label [[COND_END:%.*]] 2889 // CHECK2: cond.false: 2890 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 2891 // CHECK2-NEXT: br label [[COND_END]] 2892 // CHECK2: cond.end: 2893 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 2894 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 2895 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 2896 // CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 2897 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 2898 // CHECK2: omp.inner.for.cond: 2899 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 2900 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !59 2901 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 2902 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 2903 // CHECK2: omp.inner.for.body: 2904 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 2905 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 2906 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 2907 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !59 2908 // CHECK2-NEXT: call void @_Z3fn3v(), !llvm.access.group !59 2909 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 2910 // CHECK2: omp.body.continue: 2911 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 2912 // CHECK2: omp.inner.for.inc: 2913 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 2914 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 2915 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 2916 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]] 2917 // CHECK2: omp.inner.for.end: 2918 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 2919 // CHECK2: omp.loop.exit: 2920 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 2921 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 2922 // CHECK2-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 2923 // CHECK2-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 2924 // CHECK2: .omp.final.then: 2925 // CHECK2-NEXT: store i32 100, i32* [[I]], align 4 2926 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 2927 // CHECK2: .omp.final.done: 2928 // CHECK2-NEXT: ret void 2929 // 2930 // 2931 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 2932 // CHECK2-SAME: () #[[ATTR5:[0-9]+]] { 2933 // CHECK2-NEXT: entry: 2934 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1) 2935 // CHECK2-NEXT: ret void 2936 // 2937 // 2938 // CHECK3-LABEL: define {{[^@]+}}@_Z9gtid_testv 2939 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] { 2940 // CHECK3-NEXT: entry: 2941 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 2942 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 2943 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100) 2944 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 2945 // CHECK3-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 2946 // CHECK3-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 2947 // CHECK3: omp_offload.failed: 2948 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43() #[[ATTR2:[0-9]+]] 2949 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 2950 // CHECK3: omp_offload.cont: 2951 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 2952 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 2953 // CHECK3-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 2954 // CHECK3-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 2955 // CHECK3: omp_offload.failed2: 2956 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48() #[[ATTR2]] 2957 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT3]] 2958 // CHECK3: omp_offload.cont3: 2959 // CHECK3-NEXT: ret void 2960 // 2961 // 2962 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43 2963 // CHECK3-SAME: () #[[ATTR1:[0-9]+]] { 2964 // CHECK3-NEXT: entry: 2965 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 2966 // CHECK3-NEXT: ret void 2967 // 2968 // 2969 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. 2970 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 2971 // CHECK3-NEXT: entry: 2972 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2973 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2974 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 2975 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 2976 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 2977 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 2978 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 2979 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 2980 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 2981 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2982 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2983 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 2984 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 2985 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 2986 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 2987 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2988 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2989 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 2990 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2991 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 2992 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 2993 // CHECK3: cond.true: 2994 // CHECK3-NEXT: br label [[COND_END:%.*]] 2995 // CHECK3: cond.false: 2996 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 2997 // CHECK3-NEXT: br label [[COND_END]] 2998 // CHECK3: cond.end: 2999 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 3000 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 3001 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 3002 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 3003 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3004 // CHECK3: omp.inner.for.cond: 3005 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 3006 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11 3007 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 3008 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3009 // CHECK3: omp.inner.for.body: 3010 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11 3011 // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 3012 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11 3013 // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 3014 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !11 3015 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3016 // CHECK3: omp.inner.for.inc: 3017 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 3018 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11 3019 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 3020 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 3021 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 3022 // CHECK3: omp.inner.for.end: 3023 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3024 // CHECK3: omp.loop.exit: 3025 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 3026 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3027 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 3028 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3029 // CHECK3: .omp.final.then: 3030 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3031 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3032 // CHECK3: .omp.final.done: 3033 // CHECK3-NEXT: ret void 3034 // 3035 // 3036 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1 3037 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 3038 // CHECK3-NEXT: entry: 3039 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3040 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3041 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 3042 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 3043 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3044 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3045 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3046 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3047 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3048 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3049 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3050 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3051 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3052 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3053 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3054 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3055 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 3056 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3057 // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 3058 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3059 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 3060 // CHECK3-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 3061 // CHECK3-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 3062 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3063 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3064 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3065 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 3066 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3067 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3068 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 3069 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3070 // CHECK3: cond.true: 3071 // CHECK3-NEXT: br label [[COND_END:%.*]] 3072 // CHECK3: cond.false: 3073 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3074 // CHECK3-NEXT: br label [[COND_END]] 3075 // CHECK3: cond.end: 3076 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 3077 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3078 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3079 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 3080 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3081 // CHECK3: omp.inner.for.cond: 3082 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 3083 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15 3084 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 3085 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3086 // CHECK3: omp.inner.for.body: 3087 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 3088 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 3089 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3090 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !15 3091 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3092 // CHECK3: omp.body.continue: 3093 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3094 // CHECK3: omp.inner.for.inc: 3095 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 3096 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 3097 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 3098 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 3099 // CHECK3: omp.inner.for.end: 3100 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3101 // CHECK3: omp.loop.exit: 3102 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 3103 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3104 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 3105 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3106 // CHECK3: .omp.final.then: 3107 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3108 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3109 // CHECK3: .omp.final.done: 3110 // CHECK3-NEXT: ret void 3111 // 3112 // 3113 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48 3114 // CHECK3-SAME: () #[[ATTR1]] { 3115 // CHECK3-NEXT: entry: 3116 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*)) 3117 // CHECK3-NEXT: ret void 3118 // 3119 // 3120 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..2 3121 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 3122 // CHECK3-NEXT: entry: 3123 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3124 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3125 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3126 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3127 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 3128 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 3129 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3130 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3131 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3132 // CHECK3-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 3133 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3134 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3135 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 3136 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 3137 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3138 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3139 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3140 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 3141 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3142 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3143 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 3144 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3145 // CHECK3: cond.true: 3146 // CHECK3-NEXT: br label [[COND_END:%.*]] 3147 // CHECK3: cond.false: 3148 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3149 // CHECK3-NEXT: br label [[COND_END]] 3150 // CHECK3: cond.end: 3151 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 3152 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 3153 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 3154 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 3155 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3156 // CHECK3: omp.inner.for.cond: 3157 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 3158 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20 3159 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 3160 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3161 // CHECK3: omp.inner.for.body: 3162 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20 3163 // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 3164 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20 3165 // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 3166 // CHECK3-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !20 3167 // CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !20 3168 // CHECK3-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !20 3169 // CHECK3-NEXT: call void @.omp_outlined..3(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !20 3170 // CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !20 3171 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3172 // CHECK3: omp.inner.for.inc: 3173 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 3174 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20 3175 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 3176 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 3177 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 3178 // CHECK3: omp.inner.for.end: 3179 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3180 // CHECK3: omp.loop.exit: 3181 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 3182 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3183 // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 3184 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3185 // CHECK3: .omp.final.then: 3186 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3187 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3188 // CHECK3: .omp.final.done: 3189 // CHECK3-NEXT: ret void 3190 // 3191 // 3192 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..3 3193 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 3194 // CHECK3-NEXT: entry: 3195 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3196 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3197 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 3198 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 3199 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3200 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3201 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3202 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3203 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3204 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3205 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3206 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3207 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3208 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3209 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3210 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3211 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 3212 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3213 // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 3214 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3215 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 3216 // CHECK3-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 3217 // CHECK3-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 3218 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3219 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3220 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3221 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 3222 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3223 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3224 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 3225 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3226 // CHECK3: cond.true: 3227 // CHECK3-NEXT: br label [[COND_END:%.*]] 3228 // CHECK3: cond.false: 3229 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3230 // CHECK3-NEXT: br label [[COND_END]] 3231 // CHECK3: cond.end: 3232 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 3233 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3234 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3235 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 3236 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3237 // CHECK3: omp.inner.for.cond: 3238 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 3239 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23 3240 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 3241 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3242 // CHECK3: omp.inner.for.body: 3243 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 3244 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 3245 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3246 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !23 3247 // CHECK3-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !23 3248 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3249 // CHECK3: omp.body.continue: 3250 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3251 // CHECK3: omp.inner.for.inc: 3252 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 3253 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 3254 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 3255 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] 3256 // CHECK3: omp.inner.for.end: 3257 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3258 // CHECK3: omp.loop.exit: 3259 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 3260 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3261 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 3262 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3263 // CHECK3: .omp.final.then: 3264 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3265 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3266 // CHECK3: .omp.final.done: 3267 // CHECK3-NEXT: ret void 3268 // 3269 // 3270 // CHECK3-LABEL: define {{[^@]+}}@main 3271 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] { 3272 // CHECK3-NEXT: entry: 3273 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 3274 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3275 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 3276 // CHECK3-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 3277 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 3278 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 3279 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 3280 // CHECK3-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 3281 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4 3282 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 3283 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 3284 // CHECK3-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 3285 // CHECK3-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 3286 // CHECK3: omp_offload.failed: 3287 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81() #[[ATTR2]] 3288 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 3289 // CHECK3: omp_offload.cont: 3290 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 3291 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 3292 // CHECK3-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 3293 // CHECK3-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 3294 // CHECK3: omp_offload.failed2: 3295 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90() #[[ATTR2]] 3296 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT3]] 3297 // CHECK3: omp_offload.cont3: 3298 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* @Arg, align 4 3299 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 3300 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 3301 // CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 3302 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3303 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 3304 // CHECK3-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 3305 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3306 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 3307 // CHECK3-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 3308 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 3309 // CHECK3-NEXT: store i8* null, i8** [[TMP10]], align 8 3310 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 3311 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 3312 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 3313 // CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 3314 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 3315 // CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 3316 // CHECK3: omp_offload.failed5: 3317 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99(i64 [[TMP5]]) #[[ATTR2]] 3318 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT6]] 3319 // CHECK3: omp_offload.cont6: 3320 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 3321 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 3322 // CHECK3-NEXT: ret i32 [[CALL]] 3323 // 3324 // 3325 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 3326 // CHECK3-SAME: () #[[ATTR1]] { 3327 // CHECK3-NEXT: entry: 3328 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 3329 // CHECK3-NEXT: ret void 3330 // 3331 // 3332 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4 3333 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 3334 // CHECK3-NEXT: entry: 3335 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3336 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3337 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3338 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3339 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 3340 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 3341 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3342 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3343 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3344 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3345 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3346 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 3347 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 3348 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3349 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3350 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3351 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 3352 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3353 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3354 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 3355 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3356 // CHECK3: cond.true: 3357 // CHECK3-NEXT: br label [[COND_END:%.*]] 3358 // CHECK3: cond.false: 3359 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3360 // CHECK3-NEXT: br label [[COND_END]] 3361 // CHECK3: cond.end: 3362 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 3363 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 3364 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 3365 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 3366 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3367 // CHECK3: omp.inner.for.cond: 3368 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 3369 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26 3370 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 3371 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3372 // CHECK3: omp.inner.for.body: 3373 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26 3374 // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 3375 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26 3376 // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 3377 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !26 3378 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3379 // CHECK3: omp.inner.for.inc: 3380 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 3381 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26 3382 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 3383 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 3384 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 3385 // CHECK3: omp.inner.for.end: 3386 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3387 // CHECK3: omp.loop.exit: 3388 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 3389 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3390 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 3391 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3392 // CHECK3: .omp.final.then: 3393 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3394 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3395 // CHECK3: .omp.final.done: 3396 // CHECK3-NEXT: ret void 3397 // 3398 // 3399 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..5 3400 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 3401 // CHECK3-NEXT: entry: 3402 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3403 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3404 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 3405 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 3406 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3407 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3408 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3409 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3410 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3411 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3412 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3413 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3414 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3415 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3416 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3417 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3418 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 3419 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3420 // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 3421 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3422 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 3423 // CHECK3-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 3424 // CHECK3-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 3425 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3426 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3427 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3428 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 3429 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3430 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3431 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 3432 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3433 // CHECK3: cond.true: 3434 // CHECK3-NEXT: br label [[COND_END:%.*]] 3435 // CHECK3: cond.false: 3436 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3437 // CHECK3-NEXT: br label [[COND_END]] 3438 // CHECK3: cond.end: 3439 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 3440 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3441 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3442 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 3443 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3444 // CHECK3: omp.inner.for.cond: 3445 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 3446 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29 3447 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 3448 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3449 // CHECK3: omp.inner.for.body: 3450 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 3451 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 3452 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3453 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !29 3454 // CHECK3-NEXT: call void @_Z3fn4v(), !llvm.access.group !29 3455 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3456 // CHECK3: omp.body.continue: 3457 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3458 // CHECK3: omp.inner.for.inc: 3459 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 3460 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 3461 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 3462 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 3463 // CHECK3: omp.inner.for.end: 3464 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3465 // CHECK3: omp.loop.exit: 3466 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 3467 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3468 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 3469 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3470 // CHECK3: .omp.final.then: 3471 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3472 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3473 // CHECK3: .omp.final.done: 3474 // CHECK3-NEXT: ret void 3475 // 3476 // 3477 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90 3478 // CHECK3-SAME: () #[[ATTR1]] { 3479 // CHECK3-NEXT: entry: 3480 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*)) 3481 // CHECK3-NEXT: ret void 3482 // 3483 // 3484 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..6 3485 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 3486 // CHECK3-NEXT: entry: 3487 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3488 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3489 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3490 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3491 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 3492 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 3493 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3494 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3495 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3496 // CHECK3-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 3497 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3498 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3499 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 3500 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 3501 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3502 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3503 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3504 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 3505 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3506 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3507 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 3508 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3509 // CHECK3: cond.true: 3510 // CHECK3-NEXT: br label [[COND_END:%.*]] 3511 // CHECK3: cond.false: 3512 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3513 // CHECK3-NEXT: br label [[COND_END]] 3514 // CHECK3: cond.end: 3515 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 3516 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 3517 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 3518 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 3519 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3520 // CHECK3: omp.inner.for.cond: 3521 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3522 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3523 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 3524 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3525 // CHECK3: omp.inner.for.body: 3526 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 3527 // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 3528 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3529 // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 3530 // CHECK3-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 3531 // CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3532 // CHECK3-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 3533 // CHECK3-NEXT: call void @.omp_outlined..7(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]] 3534 // CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 3535 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3536 // CHECK3: omp.inner.for.inc: 3537 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3538 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 3539 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 3540 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 3541 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] 3542 // CHECK3: omp.inner.for.end: 3543 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3544 // CHECK3: omp.loop.exit: 3545 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 3546 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3547 // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 3548 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3549 // CHECK3: .omp.final.then: 3550 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3551 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3552 // CHECK3: .omp.final.done: 3553 // CHECK3-NEXT: ret void 3554 // 3555 // 3556 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7 3557 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 3558 // CHECK3-NEXT: entry: 3559 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3560 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3561 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 3562 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 3563 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3564 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3565 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3566 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3567 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3568 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3569 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3570 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3571 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3572 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3573 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3574 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3575 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 3576 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3577 // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 3578 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3579 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 3580 // CHECK3-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 3581 // CHECK3-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 3582 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3583 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3584 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3585 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 3586 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3587 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3588 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 3589 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3590 // CHECK3: cond.true: 3591 // CHECK3-NEXT: br label [[COND_END:%.*]] 3592 // CHECK3: cond.false: 3593 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3594 // CHECK3-NEXT: br label [[COND_END]] 3595 // CHECK3: cond.end: 3596 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 3597 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3598 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3599 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 3600 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3601 // CHECK3: omp.inner.for.cond: 3602 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3603 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3604 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 3605 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3606 // CHECK3: omp.inner.for.body: 3607 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3608 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 3609 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3610 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4 3611 // CHECK3-NEXT: call void @_Z3fn5v() 3612 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3613 // CHECK3: omp.body.continue: 3614 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3615 // CHECK3: omp.inner.for.inc: 3616 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3617 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 3618 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4 3619 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 3620 // CHECK3: omp.inner.for.end: 3621 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3622 // CHECK3: omp.loop.exit: 3623 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 3624 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3625 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 3626 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3627 // CHECK3: .omp.final.then: 3628 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3629 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3630 // CHECK3: .omp.final.done: 3631 // CHECK3-NEXT: ret void 3632 // 3633 // 3634 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99 3635 // CHECK3-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 3636 // CHECK3-NEXT: entry: 3637 // CHECK3-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 3638 // CHECK3-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 3639 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 3640 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]]) 3641 // CHECK3-NEXT: ret void 3642 // 3643 // 3644 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..8 3645 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 3646 // CHECK3-NEXT: entry: 3647 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3648 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3649 // CHECK3-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 3650 // CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 3651 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3652 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3653 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 3654 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 3655 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3656 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3657 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3658 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 3659 // CHECK3-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 3660 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED12:%.*]] = alloca i64, align 8 3661 // CHECK3-NEXT: [[DOTBOUND_ZERO_ADDR18:%.*]] = alloca i32, align 4 3662 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3663 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3664 // CHECK3-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 3665 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 3666 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 3667 // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 3668 // CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 3669 // CHECK3-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 3670 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 3671 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 3672 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3673 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3674 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3675 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 3676 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3677 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3678 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 3679 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3680 // CHECK3: cond.true: 3681 // CHECK3-NEXT: br label [[COND_END:%.*]] 3682 // CHECK3: cond.false: 3683 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3684 // CHECK3-NEXT: br label [[COND_END]] 3685 // CHECK3: cond.end: 3686 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 3687 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 3688 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 3689 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 3690 // CHECK3-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 3691 // CHECK3-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP7]] to i1 3692 // CHECK3-NEXT: br i1 [[TOBOOL1]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE7:%.*]] 3693 // CHECK3: omp_if.then: 3694 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3695 // CHECK3: omp.inner.for.cond: 3696 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 3697 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35 3698 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 3699 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3700 // CHECK3: omp.inner.for.body: 3701 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35 3702 // CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 3703 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35 3704 // CHECK3-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 3705 // CHECK3-NEXT: [[TMP14:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !35 3706 // CHECK3-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 3707 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 3708 // CHECK3-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[TOBOOL3]] to i8 3709 // CHECK3-NEXT: store i8 [[FROMBOOL4]], i8* [[CONV]], align 1, !llvm.access.group !35 3710 // CHECK3-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !35 3711 // CHECK3-NEXT: [[TMP16:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !35 3712 // CHECK3-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP16]] to i1 3713 // CHECK3-NEXT: br i1 [[TOBOOL5]], label [[OMP_IF_THEN6:%.*]], label [[OMP_IF_ELSE:%.*]] 3714 // CHECK3: omp_if.then6: 3715 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP11]], i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group !35 3716 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 3717 // CHECK3: omp_if.else: 3718 // CHECK3-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]), !llvm.access.group !35 3719 // CHECK3-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !35 3720 // CHECK3-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !35 3721 // CHECK3-NEXT: call void @.omp_outlined..9(i32* [[TMP17]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP11]], i64 [[TMP13]], i64 [[TMP15]]) #[[ATTR2]], !llvm.access.group !35 3722 // CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]), !llvm.access.group !35 3723 // CHECK3-NEXT: br label [[OMP_IF_END]] 3724 // CHECK3: omp_if.end: 3725 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3726 // CHECK3: omp.inner.for.inc: 3727 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 3728 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35 3729 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 3730 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 3731 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 3732 // CHECK3: omp.inner.for.end: 3733 // CHECK3-NEXT: br label [[OMP_IF_END23:%.*]] 3734 // CHECK3: omp_if.else7: 3735 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 3736 // CHECK3: omp.inner.for.cond8: 3737 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3738 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3739 // CHECK3-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] 3740 // CHECK3-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END22:%.*]] 3741 // CHECK3: omp.inner.for.body10: 3742 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 3743 // CHECK3-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64 3744 // CHECK3-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 3745 // CHECK3-NEXT: [[TMP25:%.*]] = zext i32 [[TMP24]] to i64 3746 // CHECK3-NEXT: [[TMP26:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 3747 // CHECK3-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP26]] to i1 3748 // CHECK3-NEXT: [[CONV13:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED12]] to i8* 3749 // CHECK3-NEXT: [[FROMBOOL14:%.*]] = zext i1 [[TOBOOL11]] to i8 3750 // CHECK3-NEXT: store i8 [[FROMBOOL14]], i8* [[CONV13]], align 1 3751 // CHECK3-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED12]], align 8 3752 // CHECK3-NEXT: [[TMP28:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 3753 // CHECK3-NEXT: [[TOBOOL15:%.*]] = trunc i8 [[TMP28]] to i1 3754 // CHECK3-NEXT: br i1 [[TOBOOL15]], label [[OMP_IF_THEN16:%.*]], label [[OMP_IF_ELSE17:%.*]] 3755 // CHECK3: omp_if.then16: 3756 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP23]], i64 [[TMP25]], i64 [[TMP27]]) 3757 // CHECK3-NEXT: br label [[OMP_IF_END19:%.*]] 3758 // CHECK3: omp_if.else17: 3759 // CHECK3-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 3760 // CHECK3-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3761 // CHECK3-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR18]], align 4 3762 // CHECK3-NEXT: call void @.omp_outlined..10(i32* [[TMP29]], i32* [[DOTBOUND_ZERO_ADDR18]], i64 [[TMP23]], i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR2]] 3763 // CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 3764 // CHECK3-NEXT: br label [[OMP_IF_END19]] 3765 // CHECK3: omp_if.end19: 3766 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC20:%.*]] 3767 // CHECK3: omp.inner.for.inc20: 3768 // CHECK3-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3769 // CHECK3-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 3770 // CHECK3-NEXT: [[ADD21:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 3771 // CHECK3-NEXT: store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4 3772 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP38:![0-9]+]] 3773 // CHECK3: omp.inner.for.end22: 3774 // CHECK3-NEXT: br label [[OMP_IF_END23]] 3775 // CHECK3: omp_if.end23: 3776 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3777 // CHECK3: omp.loop.exit: 3778 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 3779 // CHECK3-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3780 // CHECK3-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 3781 // CHECK3-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3782 // CHECK3: .omp.final.then: 3783 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3784 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3785 // CHECK3: .omp.final.done: 3786 // CHECK3-NEXT: ret void 3787 // 3788 // 3789 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..9 3790 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 3791 // CHECK3-NEXT: entry: 3792 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3793 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3794 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 3795 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 3796 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 3797 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3798 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3799 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3800 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3801 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3802 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3803 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3804 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3805 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3806 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3807 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3808 // CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 3809 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 3810 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3811 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 3812 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3813 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32 3814 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3815 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 3816 // CHECK3-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4 3817 // CHECK3-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4 3818 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3819 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3820 // CHECK3-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1 3821 // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 3822 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 3823 // CHECK3: omp_if.then: 3824 // CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3825 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 3826 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3827 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3828 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 3829 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3830 // CHECK3: cond.true: 3831 // CHECK3-NEXT: br label [[COND_END:%.*]] 3832 // CHECK3: cond.false: 3833 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3834 // CHECK3-NEXT: br label [[COND_END]] 3835 // CHECK3: cond.end: 3836 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 3837 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3838 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3839 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 3840 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3841 // CHECK3: omp.inner.for.cond: 3842 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 3843 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39 3844 // CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 3845 // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3846 // CHECK3: omp.inner.for.body: 3847 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 3848 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 3849 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3850 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39 3851 // CHECK3-NEXT: call void @_Z3fn6v(), !llvm.access.group !39 3852 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3853 // CHECK3: omp.body.continue: 3854 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3855 // CHECK3: omp.inner.for.inc: 3856 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 3857 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 3858 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 3859 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 3860 // CHECK3: omp.inner.for.end: 3861 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 3862 // CHECK3: omp_if.else: 3863 // CHECK3-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3864 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 3865 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3866 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3867 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP14]], 99 3868 // CHECK3-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]] 3869 // CHECK3: cond.true6: 3870 // CHECK3-NEXT: br label [[COND_END8:%.*]] 3871 // CHECK3: cond.false7: 3872 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3873 // CHECK3-NEXT: br label [[COND_END8]] 3874 // CHECK3: cond.end8: 3875 // CHECK3-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP15]], [[COND_FALSE7]] ] 3876 // CHECK3-NEXT: store i32 [[COND9]], i32* [[DOTOMP_UB]], align 4 3877 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3878 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4 3879 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] 3880 // CHECK3: omp.inner.for.cond10: 3881 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3882 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3883 // CHECK3-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 3884 // CHECK3-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 3885 // CHECK3: omp.inner.for.body12: 3886 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3887 // CHECK3-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1 3888 // CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 3889 // CHECK3-NEXT: store i32 [[ADD14]], i32* [[I]], align 4 3890 // CHECK3-NEXT: call void @_Z3fn6v() 3891 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 3892 // CHECK3: omp.body.continue15: 3893 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 3894 // CHECK3: omp.inner.for.inc16: 3895 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 3896 // CHECK3-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP20]], 1 3897 // CHECK3-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4 3898 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP42:![0-9]+]] 3899 // CHECK3: omp.inner.for.end18: 3900 // CHECK3-NEXT: br label [[OMP_IF_END]] 3901 // CHECK3: omp_if.end: 3902 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 3903 // CHECK3: omp.loop.exit: 3904 // CHECK3-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3905 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4 3906 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]]) 3907 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 3908 // CHECK3-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 3909 // CHECK3-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 3910 // CHECK3: .omp.final.then: 3911 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 3912 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 3913 // CHECK3: .omp.final.done: 3914 // CHECK3-NEXT: ret void 3915 // 3916 // 3917 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10 3918 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 3919 // CHECK3-NEXT: entry: 3920 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 3921 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 3922 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 3923 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 3924 // CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 3925 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 3926 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 3927 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 3928 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 3929 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 3930 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 3931 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 3932 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 3933 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 3934 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3935 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3936 // CHECK3-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 3937 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 3938 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 3939 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 3940 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 3941 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32 3942 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 3943 // CHECK3-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 3944 // CHECK3-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4 3945 // CHECK3-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4 3946 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 3947 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 3948 // CHECK3-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1 3949 // CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 3950 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 3951 // CHECK3: omp_if.then: 3952 // CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3953 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 3954 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3955 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3956 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 3957 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 3958 // CHECK3: cond.true: 3959 // CHECK3-NEXT: br label [[COND_END:%.*]] 3960 // CHECK3: cond.false: 3961 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3962 // CHECK3-NEXT: br label [[COND_END]] 3963 // CHECK3: cond.end: 3964 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 3965 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 3966 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 3967 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 3968 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 3969 // CHECK3: omp.inner.for.cond: 3970 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 3971 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !43 3972 // CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 3973 // CHECK3-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 3974 // CHECK3: omp.inner.for.body: 3975 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 3976 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 3977 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 3978 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !43 3979 // CHECK3-NEXT: call void @_Z3fn6v(), !llvm.access.group !43 3980 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 3981 // CHECK3: omp.body.continue: 3982 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 3983 // CHECK3: omp.inner.for.inc: 3984 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 3985 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 3986 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 3987 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] 3988 // CHECK3: omp.inner.for.end: 3989 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 3990 // CHECK3: omp_if.else: 3991 // CHECK3-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 3992 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 3993 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 3994 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 3995 // CHECK3-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP14]], 99 3996 // CHECK3-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]] 3997 // CHECK3: cond.true6: 3998 // CHECK3-NEXT: br label [[COND_END8:%.*]] 3999 // CHECK3: cond.false7: 4000 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4001 // CHECK3-NEXT: br label [[COND_END8]] 4002 // CHECK3: cond.end8: 4003 // CHECK3-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP15]], [[COND_FALSE7]] ] 4004 // CHECK3-NEXT: store i32 [[COND9]], i32* [[DOTOMP_UB]], align 4 4005 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4006 // CHECK3-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4 4007 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] 4008 // CHECK3: omp.inner.for.cond10: 4009 // CHECK3-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4010 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4011 // CHECK3-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 4012 // CHECK3-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 4013 // CHECK3: omp.inner.for.body12: 4014 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4015 // CHECK3-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1 4016 // CHECK3-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 4017 // CHECK3-NEXT: store i32 [[ADD14]], i32* [[I]], align 4 4018 // CHECK3-NEXT: call void @_Z3fn6v() 4019 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 4020 // CHECK3: omp.body.continue15: 4021 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 4022 // CHECK3: omp.inner.for.inc16: 4023 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4024 // CHECK3-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP20]], 1 4025 // CHECK3-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4 4026 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP46:![0-9]+]] 4027 // CHECK3: omp.inner.for.end18: 4028 // CHECK3-NEXT: br label [[OMP_IF_END]] 4029 // CHECK3: omp_if.end: 4030 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4031 // CHECK3: omp.loop.exit: 4032 // CHECK3-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4033 // CHECK3-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4 4034 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]]) 4035 // CHECK3-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4036 // CHECK3-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 4037 // CHECK3-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4038 // CHECK3: .omp.final.then: 4039 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 4040 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4041 // CHECK3: .omp.final.done: 4042 // CHECK3-NEXT: ret void 4043 // 4044 // 4045 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 4046 // CHECK3-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 4047 // CHECK3-NEXT: entry: 4048 // CHECK3-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 4049 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 4050 // CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 4051 // CHECK3-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 4052 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 4053 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 4054 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 4055 // CHECK3-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 4056 // CHECK3-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 4057 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 4058 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 4059 // CHECK3-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 4060 // CHECK3-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4061 // CHECK3: omp_offload.failed: 4062 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59() #[[ATTR2]] 4063 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 4064 // CHECK3: omp_offload.cont: 4065 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 4066 // CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 4067 // CHECK3-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 4068 // CHECK3-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 4069 // CHECK3: omp_offload.failed2: 4070 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65() #[[ATTR2]] 4071 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT3]] 4072 // CHECK3: omp_offload.cont3: 4073 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 4074 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 4075 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 4076 // CHECK3-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 4077 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4078 // CHECK3-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 4079 // CHECK3-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 4080 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4081 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 4082 // CHECK3-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 4083 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 4084 // CHECK3-NEXT: store i8* null, i8** [[TMP10]], align 8 4085 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4086 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4087 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 4088 // CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 4089 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 4090 // CHECK3-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 4091 // CHECK3: omp_offload.failed5: 4092 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71(i64 [[TMP5]]) #[[ATTR2]] 4093 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT6]] 4094 // CHECK3: omp_offload.cont6: 4095 // CHECK3-NEXT: ret i32 0 4096 // 4097 // 4098 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59 4099 // CHECK3-SAME: () #[[ATTR1]] { 4100 // CHECK3-NEXT: entry: 4101 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*)) 4102 // CHECK3-NEXT: ret void 4103 // 4104 // 4105 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 4106 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4107 // CHECK3-NEXT: entry: 4108 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4109 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4110 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4111 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 4112 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 4113 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 4114 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4115 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4116 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 4117 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4118 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4119 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 4120 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 4121 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4122 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4123 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4124 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 4125 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4126 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4127 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 4128 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4129 // CHECK3: cond.true: 4130 // CHECK3-NEXT: br label [[COND_END:%.*]] 4131 // CHECK3: cond.false: 4132 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4133 // CHECK3-NEXT: br label [[COND_END]] 4134 // CHECK3: cond.end: 4135 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 4136 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 4137 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 4138 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 4139 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4140 // CHECK3: omp.inner.for.cond: 4141 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 4142 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 4143 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 4144 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4145 // CHECK3: omp.inner.for.body: 4146 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 4147 // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 4148 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 4149 // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 4150 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !47 4151 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4152 // CHECK3: omp.inner.for.inc: 4153 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 4154 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !47 4155 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 4156 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 4157 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] 4158 // CHECK3: omp.inner.for.end: 4159 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4160 // CHECK3: omp.loop.exit: 4161 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 4162 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4163 // CHECK3-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 4164 // CHECK3-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4165 // CHECK3: .omp.final.then: 4166 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 4167 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4168 // CHECK3: .omp.final.done: 4169 // CHECK3-NEXT: ret void 4170 // 4171 // 4172 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..12 4173 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 4174 // CHECK3-NEXT: entry: 4175 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4176 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4177 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 4178 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 4179 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4180 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 4181 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4182 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4183 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4184 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4185 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 4186 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4187 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4188 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4189 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4190 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4191 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 4192 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4193 // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 4194 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4195 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 4196 // CHECK3-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 4197 // CHECK3-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 4198 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4199 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4200 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4201 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 4202 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4203 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4204 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 4205 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4206 // CHECK3: cond.true: 4207 // CHECK3-NEXT: br label [[COND_END:%.*]] 4208 // CHECK3: cond.false: 4209 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4210 // CHECK3-NEXT: br label [[COND_END]] 4211 // CHECK3: cond.end: 4212 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 4213 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4214 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4215 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 4216 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4217 // CHECK3: omp.inner.for.cond: 4218 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 4219 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !50 4220 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 4221 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4222 // CHECK3: omp.inner.for.body: 4223 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 4224 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 4225 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4226 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !50 4227 // CHECK3-NEXT: call void @_Z3fn1v(), !llvm.access.group !50 4228 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4229 // CHECK3: omp.body.continue: 4230 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4231 // CHECK3: omp.inner.for.inc: 4232 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 4233 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 4234 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 4235 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] 4236 // CHECK3: omp.inner.for.end: 4237 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4238 // CHECK3: omp.loop.exit: 4239 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 4240 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4241 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 4242 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4243 // CHECK3: .omp.final.then: 4244 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 4245 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4246 // CHECK3: .omp.final.done: 4247 // CHECK3-NEXT: ret void 4248 // 4249 // 4250 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65 4251 // CHECK3-SAME: () #[[ATTR1]] { 4252 // CHECK3-NEXT: entry: 4253 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..13 to void (i32*, i32*, ...)*)) 4254 // CHECK3-NEXT: ret void 4255 // 4256 // 4257 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13 4258 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4259 // CHECK3-NEXT: entry: 4260 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4261 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4262 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4263 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 4264 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 4265 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 4266 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4267 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4268 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 4269 // CHECK3-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 4270 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4271 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4272 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 4273 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 4274 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4275 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4276 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4277 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 4278 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4279 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4280 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 4281 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4282 // CHECK3: cond.true: 4283 // CHECK3-NEXT: br label [[COND_END:%.*]] 4284 // CHECK3: cond.false: 4285 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4286 // CHECK3-NEXT: br label [[COND_END]] 4287 // CHECK3: cond.end: 4288 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 4289 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 4290 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 4291 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 4292 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4293 // CHECK3: omp.inner.for.cond: 4294 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4295 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4296 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 4297 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4298 // CHECK3: omp.inner.for.body: 4299 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 4300 // CHECK3-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 4301 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4302 // CHECK3-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 4303 // CHECK3-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 4304 // CHECK3-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4305 // CHECK3-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 4306 // CHECK3-NEXT: call void @.omp_outlined..14(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]] 4307 // CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 4308 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4309 // CHECK3: omp.inner.for.inc: 4310 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4311 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 4312 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 4313 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 4314 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] 4315 // CHECK3: omp.inner.for.end: 4316 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4317 // CHECK3: omp.loop.exit: 4318 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 4319 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4320 // CHECK3-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 4321 // CHECK3-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4322 // CHECK3: .omp.final.then: 4323 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 4324 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4325 // CHECK3: .omp.final.done: 4326 // CHECK3-NEXT: ret void 4327 // 4328 // 4329 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14 4330 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 4331 // CHECK3-NEXT: entry: 4332 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4333 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4334 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 4335 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 4336 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4337 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 4338 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4339 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4340 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4341 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4342 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 4343 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4344 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4345 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4346 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4347 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4348 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 4349 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4350 // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 4351 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4352 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 4353 // CHECK3-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 4354 // CHECK3-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 4355 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4356 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4357 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4358 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 4359 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4360 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4361 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 4362 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4363 // CHECK3: cond.true: 4364 // CHECK3-NEXT: br label [[COND_END:%.*]] 4365 // CHECK3: cond.false: 4366 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4367 // CHECK3-NEXT: br label [[COND_END]] 4368 // CHECK3: cond.end: 4369 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 4370 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4371 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4372 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 4373 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4374 // CHECK3: omp.inner.for.cond: 4375 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4376 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4377 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 4378 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4379 // CHECK3: omp.inner.for.body: 4380 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4381 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 4382 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4383 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4 4384 // CHECK3-NEXT: call void @_Z3fn2v() 4385 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4386 // CHECK3: omp.body.continue: 4387 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4388 // CHECK3: omp.inner.for.inc: 4389 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 4390 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 4391 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4 4392 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] 4393 // CHECK3: omp.inner.for.end: 4394 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4395 // CHECK3: omp.loop.exit: 4396 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 4397 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4398 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 4399 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4400 // CHECK3: .omp.final.then: 4401 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 4402 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4403 // CHECK3: .omp.final.done: 4404 // CHECK3-NEXT: ret void 4405 // 4406 // 4407 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71 4408 // CHECK3-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 4409 // CHECK3-NEXT: entry: 4410 // CHECK3-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 4411 // CHECK3-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 4412 // CHECK3-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 4413 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32* [[CONV]]) 4414 // CHECK3-NEXT: ret void 4415 // 4416 // 4417 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15 4418 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 4419 // CHECK3-NEXT: entry: 4420 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4421 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4422 // CHECK3-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 4423 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4424 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 4425 // CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 4426 // CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 4427 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4428 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4429 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 4430 // CHECK3-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 4431 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4432 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4433 // CHECK3-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 4434 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 4435 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 4436 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 4437 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4438 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4439 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4440 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 4441 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4442 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4443 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 4444 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4445 // CHECK3: cond.true: 4446 // CHECK3-NEXT: br label [[COND_END:%.*]] 4447 // CHECK3: cond.false: 4448 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4449 // CHECK3-NEXT: br label [[COND_END]] 4450 // CHECK3: cond.end: 4451 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 4452 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 4453 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 4454 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 4455 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4456 // CHECK3: omp.inner.for.cond: 4457 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55 4458 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !55 4459 // CHECK3-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 4460 // CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4461 // CHECK3: omp.inner.for.body: 4462 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !55 4463 // CHECK3-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 4464 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !55 4465 // CHECK3-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 4466 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !55 4467 // CHECK3-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 4468 // CHECK3-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 4469 // CHECK3: omp_if.then: 4470 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !55 4471 // CHECK3-NEXT: br label [[OMP_IF_END:%.*]] 4472 // CHECK3: omp_if.else: 4473 // CHECK3-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !55 4474 // CHECK3-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !55 4475 // CHECK3-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !55 4476 // CHECK3-NEXT: call void @.omp_outlined..16(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !55 4477 // CHECK3-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !55 4478 // CHECK3-NEXT: br label [[OMP_IF_END]] 4479 // CHECK3: omp_if.end: 4480 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4481 // CHECK3: omp.inner.for.inc: 4482 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55 4483 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !55 4484 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 4485 // CHECK3-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55 4486 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]] 4487 // CHECK3: omp.inner.for.end: 4488 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4489 // CHECK3: omp.loop.exit: 4490 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 4491 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4492 // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 4493 // CHECK3-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4494 // CHECK3: .omp.final.then: 4495 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 4496 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4497 // CHECK3: .omp.final.done: 4498 // CHECK3-NEXT: ret void 4499 // 4500 // 4501 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 4502 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 4503 // CHECK3-NEXT: entry: 4504 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4505 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4506 // CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 4507 // CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 4508 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4509 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 4510 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4511 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4512 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4513 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4514 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 4515 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4516 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4517 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4518 // CHECK3-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4519 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4520 // CHECK3-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 4521 // CHECK3-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4522 // CHECK3-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 4523 // CHECK3-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4524 // CHECK3-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 4525 // CHECK3-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 4526 // CHECK3-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 4527 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4528 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4529 // CHECK3-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4530 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 4531 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4532 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4533 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 4534 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4535 // CHECK3: cond.true: 4536 // CHECK3-NEXT: br label [[COND_END:%.*]] 4537 // CHECK3: cond.false: 4538 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4539 // CHECK3-NEXT: br label [[COND_END]] 4540 // CHECK3: cond.end: 4541 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 4542 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4543 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4544 // CHECK3-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 4545 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4546 // CHECK3: omp.inner.for.cond: 4547 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 4548 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !58 4549 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 4550 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4551 // CHECK3: omp.inner.for.body: 4552 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 4553 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 4554 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4555 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !58 4556 // CHECK3-NEXT: call void @_Z3fn3v(), !llvm.access.group !58 4557 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4558 // CHECK3: omp.body.continue: 4559 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4560 // CHECK3: omp.inner.for.inc: 4561 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 4562 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 4563 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 4564 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]] 4565 // CHECK3: omp.inner.for.end: 4566 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4567 // CHECK3: omp.loop.exit: 4568 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 4569 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4570 // CHECK3-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 4571 // CHECK3-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4572 // CHECK3: .omp.final.then: 4573 // CHECK3-NEXT: store i32 100, i32* [[I]], align 4 4574 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 4575 // CHECK3: .omp.final.done: 4576 // CHECK3-NEXT: ret void 4577 // 4578 // 4579 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 4580 // CHECK3-SAME: () #[[ATTR5:[0-9]+]] { 4581 // CHECK3-NEXT: entry: 4582 // CHECK3-NEXT: call void @__tgt_register_requires(i64 1) 4583 // CHECK3-NEXT: ret void 4584 // 4585 // 4586 // CHECK4-LABEL: define {{[^@]+}}@_Z9gtid_testv 4587 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] { 4588 // CHECK4-NEXT: entry: 4589 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 4590 // CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 4591 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100) 4592 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 4593 // CHECK4-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 4594 // CHECK4-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4595 // CHECK4: omp_offload.failed: 4596 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43() #[[ATTR2:[0-9]+]] 4597 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 4598 // CHECK4: omp_offload.cont: 4599 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 4600 // CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 4601 // CHECK4-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 4602 // CHECK4-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 4603 // CHECK4: omp_offload.failed2: 4604 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48() #[[ATTR2]] 4605 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT3]] 4606 // CHECK4: omp_offload.cont3: 4607 // CHECK4-NEXT: ret void 4608 // 4609 // 4610 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43 4611 // CHECK4-SAME: () #[[ATTR1:[0-9]+]] { 4612 // CHECK4-NEXT: entry: 4613 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 4614 // CHECK4-NEXT: ret void 4615 // 4616 // 4617 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. 4618 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4619 // CHECK4-NEXT: entry: 4620 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4621 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4622 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4623 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 4624 // CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 4625 // CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 4626 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4627 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4628 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 4629 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4630 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4631 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 4632 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 4633 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4634 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4635 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4636 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 4637 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4638 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4639 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 4640 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4641 // CHECK4: cond.true: 4642 // CHECK4-NEXT: br label [[COND_END:%.*]] 4643 // CHECK4: cond.false: 4644 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4645 // CHECK4-NEXT: br label [[COND_END]] 4646 // CHECK4: cond.end: 4647 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 4648 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 4649 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 4650 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 4651 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4652 // CHECK4: omp.inner.for.cond: 4653 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 4654 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11 4655 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 4656 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4657 // CHECK4: omp.inner.for.body: 4658 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !11 4659 // CHECK4-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 4660 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !11 4661 // CHECK4-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 4662 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !11 4663 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4664 // CHECK4: omp.inner.for.inc: 4665 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 4666 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !11 4667 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 4668 // CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 4669 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 4670 // CHECK4: omp.inner.for.end: 4671 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4672 // CHECK4: omp.loop.exit: 4673 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 4674 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4675 // CHECK4-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 4676 // CHECK4-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4677 // CHECK4: .omp.final.then: 4678 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 4679 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 4680 // CHECK4: .omp.final.done: 4681 // CHECK4-NEXT: ret void 4682 // 4683 // 4684 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1 4685 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 4686 // CHECK4-NEXT: entry: 4687 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4688 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4689 // CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 4690 // CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 4691 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4692 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 4693 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4694 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4695 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4696 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4697 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 4698 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4699 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4700 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4701 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4702 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4703 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 4704 // CHECK4-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4705 // CHECK4-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 4706 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4707 // CHECK4-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 4708 // CHECK4-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 4709 // CHECK4-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 4710 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4711 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4712 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4713 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 4714 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4715 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4716 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 4717 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4718 // CHECK4: cond.true: 4719 // CHECK4-NEXT: br label [[COND_END:%.*]] 4720 // CHECK4: cond.false: 4721 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4722 // CHECK4-NEXT: br label [[COND_END]] 4723 // CHECK4: cond.end: 4724 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 4725 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4726 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4727 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 4728 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4729 // CHECK4: omp.inner.for.cond: 4730 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 4731 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !15 4732 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 4733 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4734 // CHECK4: omp.inner.for.body: 4735 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 4736 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 4737 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4738 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !15 4739 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4740 // CHECK4: omp.body.continue: 4741 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4742 // CHECK4: omp.inner.for.inc: 4743 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 4744 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 4745 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 4746 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 4747 // CHECK4: omp.inner.for.end: 4748 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4749 // CHECK4: omp.loop.exit: 4750 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 4751 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4752 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 4753 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4754 // CHECK4: .omp.final.then: 4755 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 4756 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 4757 // CHECK4: .omp.final.done: 4758 // CHECK4-NEXT: ret void 4759 // 4760 // 4761 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48 4762 // CHECK4-SAME: () #[[ATTR1]] { 4763 // CHECK4-NEXT: entry: 4764 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*)) 4765 // CHECK4-NEXT: ret void 4766 // 4767 // 4768 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..2 4769 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4770 // CHECK4-NEXT: entry: 4771 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4772 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4773 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4774 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 4775 // CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 4776 // CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 4777 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4778 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4779 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 4780 // CHECK4-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 4781 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4782 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4783 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 4784 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 4785 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4786 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4787 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4788 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 4789 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4790 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4791 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 4792 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4793 // CHECK4: cond.true: 4794 // CHECK4-NEXT: br label [[COND_END:%.*]] 4795 // CHECK4: cond.false: 4796 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 4797 // CHECK4-NEXT: br label [[COND_END]] 4798 // CHECK4: cond.end: 4799 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 4800 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 4801 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 4802 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 4803 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4804 // CHECK4: omp.inner.for.cond: 4805 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 4806 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20 4807 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 4808 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4809 // CHECK4: omp.inner.for.body: 4810 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !20 4811 // CHECK4-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 4812 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !20 4813 // CHECK4-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 4814 // CHECK4-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !20 4815 // CHECK4-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !20 4816 // CHECK4-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !20 4817 // CHECK4-NEXT: call void @.omp_outlined..3(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !20 4818 // CHECK4-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !20 4819 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4820 // CHECK4: omp.inner.for.inc: 4821 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 4822 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !20 4823 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 4824 // CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !20 4825 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] 4826 // CHECK4: omp.inner.for.end: 4827 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4828 // CHECK4: omp.loop.exit: 4829 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 4830 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4831 // CHECK4-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 4832 // CHECK4-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4833 // CHECK4: .omp.final.then: 4834 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 4835 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 4836 // CHECK4: .omp.final.done: 4837 // CHECK4-NEXT: ret void 4838 // 4839 // 4840 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..3 4841 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 4842 // CHECK4-NEXT: entry: 4843 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4844 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4845 // CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 4846 // CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 4847 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4848 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 4849 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 4850 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 4851 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4852 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4853 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 4854 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4855 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4856 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4857 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4858 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 4859 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 4860 // CHECK4-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 4861 // CHECK4-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 4862 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 4863 // CHECK4-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 4864 // CHECK4-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 4865 // CHECK4-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 4866 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4867 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4868 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4869 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 4870 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 4871 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4872 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 4873 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 4874 // CHECK4: cond.true: 4875 // CHECK4-NEXT: br label [[COND_END:%.*]] 4876 // CHECK4: cond.false: 4877 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 4878 // CHECK4-NEXT: br label [[COND_END]] 4879 // CHECK4: cond.end: 4880 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 4881 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 4882 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 4883 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 4884 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 4885 // CHECK4: omp.inner.for.cond: 4886 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 4887 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !23 4888 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 4889 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 4890 // CHECK4: omp.inner.for.body: 4891 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 4892 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 4893 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 4894 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !23 4895 // CHECK4-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !23 4896 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 4897 // CHECK4: omp.body.continue: 4898 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 4899 // CHECK4: omp.inner.for.inc: 4900 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 4901 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 4902 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !23 4903 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] 4904 // CHECK4: omp.inner.for.end: 4905 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 4906 // CHECK4: omp.loop.exit: 4907 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 4908 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 4909 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 4910 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 4911 // CHECK4: .omp.final.then: 4912 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 4913 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 4914 // CHECK4: .omp.final.done: 4915 // CHECK4-NEXT: ret void 4916 // 4917 // 4918 // CHECK4-LABEL: define {{[^@]+}}@main 4919 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] { 4920 // CHECK4-NEXT: entry: 4921 // CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 4922 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 4923 // CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 4924 // CHECK4-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 4925 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 4926 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 4927 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 4928 // CHECK4-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 4929 // CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4 4930 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 4931 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 4932 // CHECK4-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 4933 // CHECK4-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 4934 // CHECK4: omp_offload.failed: 4935 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81() #[[ATTR2]] 4936 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 4937 // CHECK4: omp_offload.cont: 4938 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 4939 // CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 4940 // CHECK4-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 4941 // CHECK4-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 4942 // CHECK4: omp_offload.failed2: 4943 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90() #[[ATTR2]] 4944 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT3]] 4945 // CHECK4: omp_offload.cont3: 4946 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* @Arg, align 4 4947 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 4948 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 4949 // CHECK4-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 4950 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4951 // CHECK4-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 4952 // CHECK4-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 4953 // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4954 // CHECK4-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 4955 // CHECK4-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 4956 // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 4957 // CHECK4-NEXT: store i8* null, i8** [[TMP10]], align 8 4958 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 4959 // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 4960 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 4961 // CHECK4-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 4962 // CHECK4-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 4963 // CHECK4-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 4964 // CHECK4: omp_offload.failed5: 4965 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99(i64 [[TMP5]]) #[[ATTR2]] 4966 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT6]] 4967 // CHECK4: omp_offload.cont6: 4968 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 4969 // CHECK4-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 4970 // CHECK4-NEXT: ret i32 [[CALL]] 4971 // 4972 // 4973 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 4974 // CHECK4-SAME: () #[[ATTR1]] { 4975 // CHECK4-NEXT: entry: 4976 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 4977 // CHECK4-NEXT: ret void 4978 // 4979 // 4980 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..4 4981 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 4982 // CHECK4-NEXT: entry: 4983 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 4984 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 4985 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 4986 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 4987 // CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 4988 // CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 4989 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 4990 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 4991 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 4992 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 4993 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 4994 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 4995 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 4996 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 4997 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 4998 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 4999 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 5000 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5001 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5002 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 5003 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5004 // CHECK4: cond.true: 5005 // CHECK4-NEXT: br label [[COND_END:%.*]] 5006 // CHECK4: cond.false: 5007 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5008 // CHECK4-NEXT: br label [[COND_END]] 5009 // CHECK4: cond.end: 5010 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 5011 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 5012 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 5013 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 5014 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5015 // CHECK4: omp.inner.for.cond: 5016 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 5017 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26 5018 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 5019 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5020 // CHECK4: omp.inner.for.body: 5021 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !26 5022 // CHECK4-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 5023 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !26 5024 // CHECK4-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 5025 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !26 5026 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5027 // CHECK4: omp.inner.for.inc: 5028 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 5029 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !26 5030 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 5031 // CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !26 5032 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] 5033 // CHECK4: omp.inner.for.end: 5034 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5035 // CHECK4: omp.loop.exit: 5036 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 5037 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5038 // CHECK4-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 5039 // CHECK4-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5040 // CHECK4: .omp.final.then: 5041 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5042 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5043 // CHECK4: .omp.final.done: 5044 // CHECK4-NEXT: ret void 5045 // 5046 // 5047 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..5 5048 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 5049 // CHECK4-NEXT: entry: 5050 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5051 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5052 // CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 5053 // CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 5054 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5055 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5056 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5057 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5058 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5059 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5060 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5061 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5062 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5063 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5064 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5065 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5066 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 5067 // CHECK4-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5068 // CHECK4-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 5069 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5070 // CHECK4-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 5071 // CHECK4-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 5072 // CHECK4-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 5073 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5074 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5075 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5076 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 5077 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5078 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5079 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 5080 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5081 // CHECK4: cond.true: 5082 // CHECK4-NEXT: br label [[COND_END:%.*]] 5083 // CHECK4: cond.false: 5084 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5085 // CHECK4-NEXT: br label [[COND_END]] 5086 // CHECK4: cond.end: 5087 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 5088 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5089 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5090 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 5091 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5092 // CHECK4: omp.inner.for.cond: 5093 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 5094 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !29 5095 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 5096 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5097 // CHECK4: omp.inner.for.body: 5098 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 5099 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 5100 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5101 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !29 5102 // CHECK4-NEXT: call void @_Z3fn4v(), !llvm.access.group !29 5103 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5104 // CHECK4: omp.body.continue: 5105 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5106 // CHECK4: omp.inner.for.inc: 5107 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 5108 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 5109 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 5110 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP30:![0-9]+]] 5111 // CHECK4: omp.inner.for.end: 5112 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5113 // CHECK4: omp.loop.exit: 5114 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 5115 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5116 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 5117 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5118 // CHECK4: .omp.final.then: 5119 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5120 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5121 // CHECK4: .omp.final.done: 5122 // CHECK4-NEXT: ret void 5123 // 5124 // 5125 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90 5126 // CHECK4-SAME: () #[[ATTR1]] { 5127 // CHECK4-NEXT: entry: 5128 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*)) 5129 // CHECK4-NEXT: ret void 5130 // 5131 // 5132 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..6 5133 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 5134 // CHECK4-NEXT: entry: 5135 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5136 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5137 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5138 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5139 // CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 5140 // CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 5141 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5142 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5143 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5144 // CHECK4-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 5145 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5146 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5147 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 5148 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 5149 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5150 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5151 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5152 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 5153 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5154 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5155 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 5156 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5157 // CHECK4: cond.true: 5158 // CHECK4-NEXT: br label [[COND_END:%.*]] 5159 // CHECK4: cond.false: 5160 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5161 // CHECK4-NEXT: br label [[COND_END]] 5162 // CHECK4: cond.end: 5163 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 5164 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 5165 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 5166 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 5167 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5168 // CHECK4: omp.inner.for.cond: 5169 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5170 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5171 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 5172 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5173 // CHECK4: omp.inner.for.body: 5174 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 5175 // CHECK4-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 5176 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5177 // CHECK4-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 5178 // CHECK4-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 5179 // CHECK4-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5180 // CHECK4-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 5181 // CHECK4-NEXT: call void @.omp_outlined..7(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]] 5182 // CHECK4-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 5183 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5184 // CHECK4: omp.inner.for.inc: 5185 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5186 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 5187 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 5188 // CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 5189 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP32:![0-9]+]] 5190 // CHECK4: omp.inner.for.end: 5191 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5192 // CHECK4: omp.loop.exit: 5193 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 5194 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5195 // CHECK4-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 5196 // CHECK4-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5197 // CHECK4: .omp.final.then: 5198 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5199 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5200 // CHECK4: .omp.final.done: 5201 // CHECK4-NEXT: ret void 5202 // 5203 // 5204 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..7 5205 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 5206 // CHECK4-NEXT: entry: 5207 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5208 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5209 // CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 5210 // CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 5211 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5212 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5213 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5214 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5215 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5216 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5217 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5218 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5219 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5220 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5221 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5222 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5223 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 5224 // CHECK4-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5225 // CHECK4-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 5226 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5227 // CHECK4-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 5228 // CHECK4-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 5229 // CHECK4-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 5230 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5231 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5232 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5233 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 5234 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5235 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5236 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 5237 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5238 // CHECK4: cond.true: 5239 // CHECK4-NEXT: br label [[COND_END:%.*]] 5240 // CHECK4: cond.false: 5241 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5242 // CHECK4-NEXT: br label [[COND_END]] 5243 // CHECK4: cond.end: 5244 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 5245 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5246 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5247 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 5248 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5249 // CHECK4: omp.inner.for.cond: 5250 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5251 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5252 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 5253 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5254 // CHECK4: omp.inner.for.body: 5255 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5256 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 5257 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5258 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4 5259 // CHECK4-NEXT: call void @_Z3fn5v() 5260 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5261 // CHECK4: omp.body.continue: 5262 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5263 // CHECK4: omp.inner.for.inc: 5264 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5265 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 5266 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4 5267 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 5268 // CHECK4: omp.inner.for.end: 5269 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5270 // CHECK4: omp.loop.exit: 5271 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 5272 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5273 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 5274 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5275 // CHECK4: .omp.final.then: 5276 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5277 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5278 // CHECK4: .omp.final.done: 5279 // CHECK4-NEXT: ret void 5280 // 5281 // 5282 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99 5283 // CHECK4-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 5284 // CHECK4-NEXT: entry: 5285 // CHECK4-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 5286 // CHECK4-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 5287 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 5288 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]]) 5289 // CHECK4-NEXT: ret void 5290 // 5291 // 5292 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..8 5293 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 5294 // CHECK4-NEXT: entry: 5295 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5296 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5297 // CHECK4-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 5298 // CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 5299 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5300 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5301 // CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 5302 // CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 5303 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5304 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5305 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5306 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 5307 // CHECK4-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 5308 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED12:%.*]] = alloca i64, align 8 5309 // CHECK4-NEXT: [[DOTBOUND_ZERO_ADDR18:%.*]] = alloca i32, align 4 5310 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5311 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5312 // CHECK4-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 5313 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 5314 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 5315 // CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 5316 // CHECK4-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 5317 // CHECK4-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 5318 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 5319 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 5320 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5321 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5322 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5323 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 5324 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5325 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5326 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 5327 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5328 // CHECK4: cond.true: 5329 // CHECK4-NEXT: br label [[COND_END:%.*]] 5330 // CHECK4: cond.false: 5331 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5332 // CHECK4-NEXT: br label [[COND_END]] 5333 // CHECK4: cond.end: 5334 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 5335 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 5336 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 5337 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 5338 // CHECK4-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 5339 // CHECK4-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP7]] to i1 5340 // CHECK4-NEXT: br i1 [[TOBOOL1]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE7:%.*]] 5341 // CHECK4: omp_if.then: 5342 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5343 // CHECK4: omp.inner.for.cond: 5344 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 5345 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35 5346 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 5347 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5348 // CHECK4: omp.inner.for.body: 5349 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35 5350 // CHECK4-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 5351 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35 5352 // CHECK4-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 5353 // CHECK4-NEXT: [[TMP14:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !35 5354 // CHECK4-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 5355 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 5356 // CHECK4-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[TOBOOL3]] to i8 5357 // CHECK4-NEXT: store i8 [[FROMBOOL4]], i8* [[CONV]], align 1, !llvm.access.group !35 5358 // CHECK4-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !35 5359 // CHECK4-NEXT: [[TMP16:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !35 5360 // CHECK4-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP16]] to i1 5361 // CHECK4-NEXT: br i1 [[TOBOOL5]], label [[OMP_IF_THEN6:%.*]], label [[OMP_IF_ELSE:%.*]] 5362 // CHECK4: omp_if.then6: 5363 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP11]], i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group !35 5364 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 5365 // CHECK4: omp_if.else: 5366 // CHECK4-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]), !llvm.access.group !35 5367 // CHECK4-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !35 5368 // CHECK4-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !35 5369 // CHECK4-NEXT: call void @.omp_outlined..9(i32* [[TMP17]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP11]], i64 [[TMP13]], i64 [[TMP15]]) #[[ATTR2]], !llvm.access.group !35 5370 // CHECK4-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]), !llvm.access.group !35 5371 // CHECK4-NEXT: br label [[OMP_IF_END]] 5372 // CHECK4: omp_if.end: 5373 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5374 // CHECK4: omp.inner.for.inc: 5375 // CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 5376 // CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !35 5377 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 5378 // CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 5379 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 5380 // CHECK4: omp.inner.for.end: 5381 // CHECK4-NEXT: br label [[OMP_IF_END23:%.*]] 5382 // CHECK4: omp_if.else7: 5383 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 5384 // CHECK4: omp.inner.for.cond8: 5385 // CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5386 // CHECK4-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5387 // CHECK4-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] 5388 // CHECK4-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END22:%.*]] 5389 // CHECK4: omp.inner.for.body10: 5390 // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 5391 // CHECK4-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64 5392 // CHECK4-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5393 // CHECK4-NEXT: [[TMP25:%.*]] = zext i32 [[TMP24]] to i64 5394 // CHECK4-NEXT: [[TMP26:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 5395 // CHECK4-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP26]] to i1 5396 // CHECK4-NEXT: [[CONV13:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED12]] to i8* 5397 // CHECK4-NEXT: [[FROMBOOL14:%.*]] = zext i1 [[TOBOOL11]] to i8 5398 // CHECK4-NEXT: store i8 [[FROMBOOL14]], i8* [[CONV13]], align 1 5399 // CHECK4-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED12]], align 8 5400 // CHECK4-NEXT: [[TMP28:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 5401 // CHECK4-NEXT: [[TOBOOL15:%.*]] = trunc i8 [[TMP28]] to i1 5402 // CHECK4-NEXT: br i1 [[TOBOOL15]], label [[OMP_IF_THEN16:%.*]], label [[OMP_IF_ELSE17:%.*]] 5403 // CHECK4: omp_if.then16: 5404 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP23]], i64 [[TMP25]], i64 [[TMP27]]) 5405 // CHECK4-NEXT: br label [[OMP_IF_END19:%.*]] 5406 // CHECK4: omp_if.else17: 5407 // CHECK4-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 5408 // CHECK4-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5409 // CHECK4-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR18]], align 4 5410 // CHECK4-NEXT: call void @.omp_outlined..10(i32* [[TMP29]], i32* [[DOTBOUND_ZERO_ADDR18]], i64 [[TMP23]], i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR2]] 5411 // CHECK4-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 5412 // CHECK4-NEXT: br label [[OMP_IF_END19]] 5413 // CHECK4: omp_if.end19: 5414 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC20:%.*]] 5415 // CHECK4: omp.inner.for.inc20: 5416 // CHECK4-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5417 // CHECK4-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 5418 // CHECK4-NEXT: [[ADD21:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 5419 // CHECK4-NEXT: store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4 5420 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP38:![0-9]+]] 5421 // CHECK4: omp.inner.for.end22: 5422 // CHECK4-NEXT: br label [[OMP_IF_END23]] 5423 // CHECK4: omp_if.end23: 5424 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5425 // CHECK4: omp.loop.exit: 5426 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 5427 // CHECK4-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5428 // CHECK4-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 5429 // CHECK4-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5430 // CHECK4: .omp.final.then: 5431 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5432 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5433 // CHECK4: .omp.final.done: 5434 // CHECK4-NEXT: ret void 5435 // 5436 // 5437 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..9 5438 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 5439 // CHECK4-NEXT: entry: 5440 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5441 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5442 // CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 5443 // CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 5444 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 5445 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5446 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5447 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5448 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5449 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5450 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5451 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5452 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5453 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5454 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5455 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5456 // CHECK4-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 5457 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 5458 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5459 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 5460 // CHECK4-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5461 // CHECK4-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32 5462 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5463 // CHECK4-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 5464 // CHECK4-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4 5465 // CHECK4-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4 5466 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5467 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5468 // CHECK4-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1 5469 // CHECK4-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 5470 // CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 5471 // CHECK4: omp_if.then: 5472 // CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5473 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 5474 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5475 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5476 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 5477 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5478 // CHECK4: cond.true: 5479 // CHECK4-NEXT: br label [[COND_END:%.*]] 5480 // CHECK4: cond.false: 5481 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5482 // CHECK4-NEXT: br label [[COND_END]] 5483 // CHECK4: cond.end: 5484 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 5485 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5486 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5487 // CHECK4-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 5488 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5489 // CHECK4: omp.inner.for.cond: 5490 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 5491 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39 5492 // CHECK4-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 5493 // CHECK4-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5494 // CHECK4: omp.inner.for.body: 5495 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 5496 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 5497 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5498 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39 5499 // CHECK4-NEXT: call void @_Z3fn6v(), !llvm.access.group !39 5500 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5501 // CHECK4: omp.body.continue: 5502 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5503 // CHECK4: omp.inner.for.inc: 5504 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 5505 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 5506 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 5507 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 5508 // CHECK4: omp.inner.for.end: 5509 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 5510 // CHECK4: omp_if.else: 5511 // CHECK4-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5512 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 5513 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5514 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5515 // CHECK4-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP14]], 99 5516 // CHECK4-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]] 5517 // CHECK4: cond.true6: 5518 // CHECK4-NEXT: br label [[COND_END8:%.*]] 5519 // CHECK4: cond.false7: 5520 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5521 // CHECK4-NEXT: br label [[COND_END8]] 5522 // CHECK4: cond.end8: 5523 // CHECK4-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP15]], [[COND_FALSE7]] ] 5524 // CHECK4-NEXT: store i32 [[COND9]], i32* [[DOTOMP_UB]], align 4 5525 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5526 // CHECK4-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4 5527 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] 5528 // CHECK4: omp.inner.for.cond10: 5529 // CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5530 // CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5531 // CHECK4-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 5532 // CHECK4-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 5533 // CHECK4: omp.inner.for.body12: 5534 // CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5535 // CHECK4-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1 5536 // CHECK4-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 5537 // CHECK4-NEXT: store i32 [[ADD14]], i32* [[I]], align 4 5538 // CHECK4-NEXT: call void @_Z3fn6v() 5539 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 5540 // CHECK4: omp.body.continue15: 5541 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 5542 // CHECK4: omp.inner.for.inc16: 5543 // CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5544 // CHECK4-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP20]], 1 5545 // CHECK4-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4 5546 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP42:![0-9]+]] 5547 // CHECK4: omp.inner.for.end18: 5548 // CHECK4-NEXT: br label [[OMP_IF_END]] 5549 // CHECK4: omp_if.end: 5550 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5551 // CHECK4: omp.loop.exit: 5552 // CHECK4-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5553 // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4 5554 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]]) 5555 // CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5556 // CHECK4-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 5557 // CHECK4-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5558 // CHECK4: .omp.final.then: 5559 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5560 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5561 // CHECK4: .omp.final.done: 5562 // CHECK4-NEXT: ret void 5563 // 5564 // 5565 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10 5566 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 5567 // CHECK4-NEXT: entry: 5568 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5569 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5570 // CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 5571 // CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 5572 // CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 5573 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5574 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5575 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5576 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5577 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5578 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5579 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5580 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5581 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5582 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5583 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5584 // CHECK4-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 5585 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 5586 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5587 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 5588 // CHECK4-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5589 // CHECK4-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32 5590 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5591 // CHECK4-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 5592 // CHECK4-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4 5593 // CHECK4-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4 5594 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5595 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5596 // CHECK4-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1 5597 // CHECK4-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 5598 // CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 5599 // CHECK4: omp_if.then: 5600 // CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5601 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 5602 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5603 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5604 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 5605 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5606 // CHECK4: cond.true: 5607 // CHECK4-NEXT: br label [[COND_END:%.*]] 5608 // CHECK4: cond.false: 5609 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5610 // CHECK4-NEXT: br label [[COND_END]] 5611 // CHECK4: cond.end: 5612 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 5613 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5614 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5615 // CHECK4-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 5616 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5617 // CHECK4: omp.inner.for.cond: 5618 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 5619 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !43 5620 // CHECK4-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 5621 // CHECK4-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5622 // CHECK4: omp.inner.for.body: 5623 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 5624 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 5625 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5626 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !43 5627 // CHECK4-NEXT: call void @_Z3fn6v(), !llvm.access.group !43 5628 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5629 // CHECK4: omp.body.continue: 5630 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5631 // CHECK4: omp.inner.for.inc: 5632 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 5633 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 5634 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 5635 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] 5636 // CHECK4: omp.inner.for.end: 5637 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 5638 // CHECK4: omp_if.else: 5639 // CHECK4-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5640 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 5641 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5642 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5643 // CHECK4-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP14]], 99 5644 // CHECK4-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]] 5645 // CHECK4: cond.true6: 5646 // CHECK4-NEXT: br label [[COND_END8:%.*]] 5647 // CHECK4: cond.false7: 5648 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5649 // CHECK4-NEXT: br label [[COND_END8]] 5650 // CHECK4: cond.end8: 5651 // CHECK4-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP15]], [[COND_FALSE7]] ] 5652 // CHECK4-NEXT: store i32 [[COND9]], i32* [[DOTOMP_UB]], align 4 5653 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5654 // CHECK4-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4 5655 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] 5656 // CHECK4: omp.inner.for.cond10: 5657 // CHECK4-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5658 // CHECK4-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5659 // CHECK4-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 5660 // CHECK4-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 5661 // CHECK4: omp.inner.for.body12: 5662 // CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5663 // CHECK4-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1 5664 // CHECK4-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 5665 // CHECK4-NEXT: store i32 [[ADD14]], i32* [[I]], align 4 5666 // CHECK4-NEXT: call void @_Z3fn6v() 5667 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 5668 // CHECK4: omp.body.continue15: 5669 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 5670 // CHECK4: omp.inner.for.inc16: 5671 // CHECK4-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5672 // CHECK4-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP20]], 1 5673 // CHECK4-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4 5674 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP46:![0-9]+]] 5675 // CHECK4: omp.inner.for.end18: 5676 // CHECK4-NEXT: br label [[OMP_IF_END]] 5677 // CHECK4: omp_if.end: 5678 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5679 // CHECK4: omp.loop.exit: 5680 // CHECK4-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5681 // CHECK4-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4 5682 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]]) 5683 // CHECK4-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5684 // CHECK4-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 5685 // CHECK4-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5686 // CHECK4: .omp.final.then: 5687 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5688 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5689 // CHECK4: .omp.final.done: 5690 // CHECK4-NEXT: ret void 5691 // 5692 // 5693 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 5694 // CHECK4-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 5695 // CHECK4-NEXT: entry: 5696 // CHECK4-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 5697 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5698 // CHECK4-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 5699 // CHECK4-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 5700 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 5701 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 5702 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 5703 // CHECK4-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 5704 // CHECK4-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 5705 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 5706 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 5707 // CHECK4-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 5708 // CHECK4-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 5709 // CHECK4: omp_offload.failed: 5710 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59() #[[ATTR2]] 5711 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 5712 // CHECK4: omp_offload.cont: 5713 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 5714 // CHECK4-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 5715 // CHECK4-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 5716 // CHECK4-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 5717 // CHECK4: omp_offload.failed2: 5718 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65() #[[ATTR2]] 5719 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT3]] 5720 // CHECK4: omp_offload.cont3: 5721 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 5722 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 5723 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 5724 // CHECK4-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 5725 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5726 // CHECK4-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 5727 // CHECK4-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 5728 // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5729 // CHECK4-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 5730 // CHECK4-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 5731 // CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 5732 // CHECK4-NEXT: store i8* null, i8** [[TMP10]], align 8 5733 // CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 5734 // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 5735 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 5736 // CHECK4-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 5737 // CHECK4-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 5738 // CHECK4-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 5739 // CHECK4: omp_offload.failed5: 5740 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71(i64 [[TMP5]]) #[[ATTR2]] 5741 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT6]] 5742 // CHECK4: omp_offload.cont6: 5743 // CHECK4-NEXT: ret i32 0 5744 // 5745 // 5746 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59 5747 // CHECK4-SAME: () #[[ATTR1]] { 5748 // CHECK4-NEXT: entry: 5749 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*)) 5750 // CHECK4-NEXT: ret void 5751 // 5752 // 5753 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 5754 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 5755 // CHECK4-NEXT: entry: 5756 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5757 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5758 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5759 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5760 // CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 5761 // CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 5762 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5763 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5764 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5765 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5766 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5767 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 5768 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 5769 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5770 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5771 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5772 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 5773 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5774 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5775 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 5776 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5777 // CHECK4: cond.true: 5778 // CHECK4-NEXT: br label [[COND_END:%.*]] 5779 // CHECK4: cond.false: 5780 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5781 // CHECK4-NEXT: br label [[COND_END]] 5782 // CHECK4: cond.end: 5783 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 5784 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 5785 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 5786 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 5787 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5788 // CHECK4: omp.inner.for.cond: 5789 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 5790 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 5791 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 5792 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5793 // CHECK4: omp.inner.for.body: 5794 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 5795 // CHECK4-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 5796 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 5797 // CHECK4-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 5798 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !47 5799 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5800 // CHECK4: omp.inner.for.inc: 5801 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 5802 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !47 5803 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 5804 // CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 5805 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] 5806 // CHECK4: omp.inner.for.end: 5807 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5808 // CHECK4: omp.loop.exit: 5809 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 5810 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5811 // CHECK4-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 5812 // CHECK4-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5813 // CHECK4: .omp.final.then: 5814 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5815 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5816 // CHECK4: .omp.final.done: 5817 // CHECK4-NEXT: ret void 5818 // 5819 // 5820 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..12 5821 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 5822 // CHECK4-NEXT: entry: 5823 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5824 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5825 // CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 5826 // CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 5827 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5828 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5829 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5830 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5831 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5832 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5833 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5834 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5835 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5836 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5837 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5838 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5839 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 5840 // CHECK4-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5841 // CHECK4-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 5842 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5843 // CHECK4-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 5844 // CHECK4-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 5845 // CHECK4-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 5846 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5847 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5848 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5849 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 5850 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5851 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5852 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 5853 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5854 // CHECK4: cond.true: 5855 // CHECK4-NEXT: br label [[COND_END:%.*]] 5856 // CHECK4: cond.false: 5857 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 5858 // CHECK4-NEXT: br label [[COND_END]] 5859 // CHECK4: cond.end: 5860 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 5861 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 5862 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 5863 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 5864 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5865 // CHECK4: omp.inner.for.cond: 5866 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 5867 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !50 5868 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 5869 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5870 // CHECK4: omp.inner.for.body: 5871 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 5872 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 5873 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 5874 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !50 5875 // CHECK4-NEXT: call void @_Z3fn1v(), !llvm.access.group !50 5876 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 5877 // CHECK4: omp.body.continue: 5878 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5879 // CHECK4: omp.inner.for.inc: 5880 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 5881 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 5882 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !50 5883 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP51:![0-9]+]] 5884 // CHECK4: omp.inner.for.end: 5885 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5886 // CHECK4: omp.loop.exit: 5887 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 5888 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5889 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 5890 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5891 // CHECK4: .omp.final.then: 5892 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5893 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5894 // CHECK4: .omp.final.done: 5895 // CHECK4-NEXT: ret void 5896 // 5897 // 5898 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65 5899 // CHECK4-SAME: () #[[ATTR1]] { 5900 // CHECK4-NEXT: entry: 5901 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..13 to void (i32*, i32*, ...)*)) 5902 // CHECK4-NEXT: ret void 5903 // 5904 // 5905 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13 5906 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 5907 // CHECK4-NEXT: entry: 5908 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5909 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5910 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5911 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5912 // CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 5913 // CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 5914 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5915 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5916 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5917 // CHECK4-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 5918 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5919 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5920 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 5921 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 5922 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 5923 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 5924 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5925 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 5926 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 5927 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5928 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 5929 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 5930 // CHECK4: cond.true: 5931 // CHECK4-NEXT: br label [[COND_END:%.*]] 5932 // CHECK4: cond.false: 5933 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5934 // CHECK4-NEXT: br label [[COND_END]] 5935 // CHECK4: cond.end: 5936 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 5937 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 5938 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 5939 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 5940 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 5941 // CHECK4: omp.inner.for.cond: 5942 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5943 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5944 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 5945 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 5946 // CHECK4: omp.inner.for.body: 5947 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 5948 // CHECK4-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 5949 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 5950 // CHECK4-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 5951 // CHECK4-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 5952 // CHECK4-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 5953 // CHECK4-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 5954 // CHECK4-NEXT: call void @.omp_outlined..14(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]] 5955 // CHECK4-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 5956 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 5957 // CHECK4: omp.inner.for.inc: 5958 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 5959 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 5960 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 5961 // CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 5962 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP53:![0-9]+]] 5963 // CHECK4: omp.inner.for.end: 5964 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 5965 // CHECK4: omp.loop.exit: 5966 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 5967 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 5968 // CHECK4-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 5969 // CHECK4-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 5970 // CHECK4: .omp.final.then: 5971 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 5972 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 5973 // CHECK4: .omp.final.done: 5974 // CHECK4-NEXT: ret void 5975 // 5976 // 5977 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14 5978 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 5979 // CHECK4-NEXT: entry: 5980 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 5981 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 5982 // CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 5983 // CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 5984 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 5985 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 5986 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 5987 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 5988 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 5989 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 5990 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 5991 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 5992 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 5993 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5994 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 5995 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 5996 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 5997 // CHECK4-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 5998 // CHECK4-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 5999 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 6000 // CHECK4-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 6001 // CHECK4-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 6002 // CHECK4-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 6003 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 6004 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6005 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 6006 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 6007 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 6008 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6009 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 6010 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6011 // CHECK4: cond.true: 6012 // CHECK4-NEXT: br label [[COND_END:%.*]] 6013 // CHECK4: cond.false: 6014 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6015 // CHECK4-NEXT: br label [[COND_END]] 6016 // CHECK4: cond.end: 6017 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 6018 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 6019 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6020 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 6021 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6022 // CHECK4: omp.inner.for.cond: 6023 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6024 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6025 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 6026 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6027 // CHECK4: omp.inner.for.body: 6028 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6029 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 6030 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6031 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4 6032 // CHECK4-NEXT: call void @_Z3fn2v() 6033 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6034 // CHECK4: omp.body.continue: 6035 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6036 // CHECK4: omp.inner.for.inc: 6037 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 6038 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 6039 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4 6040 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP54:![0-9]+]] 6041 // CHECK4: omp.inner.for.end: 6042 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6043 // CHECK4: omp.loop.exit: 6044 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 6045 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 6046 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 6047 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 6048 // CHECK4: .omp.final.then: 6049 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 6050 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 6051 // CHECK4: .omp.final.done: 6052 // CHECK4-NEXT: ret void 6053 // 6054 // 6055 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71 6056 // CHECK4-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 6057 // CHECK4-NEXT: entry: 6058 // CHECK4-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 6059 // CHECK4-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 6060 // CHECK4-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 6061 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32* [[CONV]]) 6062 // CHECK4-NEXT: ret void 6063 // 6064 // 6065 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..15 6066 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 6067 // CHECK4-NEXT: entry: 6068 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6069 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6070 // CHECK4-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 6071 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6072 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 6073 // CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 6074 // CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 6075 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 6076 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6077 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 6078 // CHECK4-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 6079 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6080 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6081 // CHECK4-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 6082 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 6083 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 6084 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 6085 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 6086 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6087 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 6088 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 6089 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 6090 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 6091 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 6092 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6093 // CHECK4: cond.true: 6094 // CHECK4-NEXT: br label [[COND_END:%.*]] 6095 // CHECK4: cond.false: 6096 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 6097 // CHECK4-NEXT: br label [[COND_END]] 6098 // CHECK4: cond.end: 6099 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 6100 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 6101 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 6102 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 6103 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6104 // CHECK4: omp.inner.for.cond: 6105 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55 6106 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !55 6107 // CHECK4-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 6108 // CHECK4-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6109 // CHECK4: omp.inner.for.body: 6110 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !55 6111 // CHECK4-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 6112 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !55 6113 // CHECK4-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 6114 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !55 6115 // CHECK4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 6116 // CHECK4-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 6117 // CHECK4: omp_if.then: 6118 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !55 6119 // CHECK4-NEXT: br label [[OMP_IF_END:%.*]] 6120 // CHECK4: omp_if.else: 6121 // CHECK4-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !55 6122 // CHECK4-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !55 6123 // CHECK4-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !55 6124 // CHECK4-NEXT: call void @.omp_outlined..16(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !55 6125 // CHECK4-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !55 6126 // CHECK4-NEXT: br label [[OMP_IF_END]] 6127 // CHECK4: omp_if.end: 6128 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6129 // CHECK4: omp.inner.for.inc: 6130 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55 6131 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !55 6132 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 6133 // CHECK4-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !55 6134 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP56:![0-9]+]] 6135 // CHECK4: omp.inner.for.end: 6136 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6137 // CHECK4: omp.loop.exit: 6138 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 6139 // CHECK4-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 6140 // CHECK4-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 6141 // CHECK4-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 6142 // CHECK4: .omp.final.then: 6143 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 6144 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 6145 // CHECK4: .omp.final.done: 6146 // CHECK4-NEXT: ret void 6147 // 6148 // 6149 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16 6150 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 6151 // CHECK4-NEXT: entry: 6152 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 6153 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 6154 // CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 6155 // CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 6156 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6157 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 6158 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6159 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6160 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 6161 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 6162 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 6163 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 6164 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 6165 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 6166 // CHECK4-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 6167 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6168 // CHECK4-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 6169 // CHECK4-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 6170 // CHECK4-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 6171 // CHECK4-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 6172 // CHECK4-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 6173 // CHECK4-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 6174 // CHECK4-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 6175 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 6176 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 6177 // CHECK4-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 6178 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 6179 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 6180 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6181 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 6182 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 6183 // CHECK4: cond.true: 6184 // CHECK4-NEXT: br label [[COND_END:%.*]] 6185 // CHECK4: cond.false: 6186 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 6187 // CHECK4-NEXT: br label [[COND_END]] 6188 // CHECK4: cond.end: 6189 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 6190 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 6191 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6192 // CHECK4-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 6193 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6194 // CHECK4: omp.inner.for.cond: 6195 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 6196 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !58 6197 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 6198 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6199 // CHECK4: omp.inner.for.body: 6200 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 6201 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 6202 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6203 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !58 6204 // CHECK4-NEXT: call void @_Z3fn3v(), !llvm.access.group !58 6205 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6206 // CHECK4: omp.body.continue: 6207 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6208 // CHECK4: omp.inner.for.inc: 6209 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 6210 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 6211 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 6212 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP59:![0-9]+]] 6213 // CHECK4: omp.inner.for.end: 6214 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 6215 // CHECK4: omp.loop.exit: 6216 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 6217 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 6218 // CHECK4-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 6219 // CHECK4-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 6220 // CHECK4: .omp.final.then: 6221 // CHECK4-NEXT: store i32 100, i32* [[I]], align 4 6222 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 6223 // CHECK4: .omp.final.done: 6224 // CHECK4-NEXT: ret void 6225 // 6226 // 6227 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 6228 // CHECK4-SAME: () #[[ATTR5:[0-9]+]] { 6229 // CHECK4-NEXT: entry: 6230 // CHECK4-NEXT: call void @__tgt_register_requires(i64 1) 6231 // CHECK4-NEXT: ret void 6232 // 6233 // 6234 // CHECK5-LABEL: define {{[^@]+}}@_Z9gtid_testv 6235 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] { 6236 // CHECK5-NEXT: entry: 6237 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 6238 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6239 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6240 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6241 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 6242 // CHECK5-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6243 // CHECK5-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6244 // CHECK5-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6245 // CHECK5-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6246 // CHECK5-NEXT: [[I6:%.*]] = alloca i32, align 4 6247 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6248 // CHECK5-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 6249 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6250 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6251 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6252 // CHECK5: omp.inner.for.cond: 6253 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6254 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 6255 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6256 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6257 // CHECK5: omp.inner.for.body: 6258 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6259 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6260 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6261 // CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 6262 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6263 // CHECK5: omp.body.continue: 6264 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6265 // CHECK5: omp.inner.for.inc: 6266 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6267 // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 6268 // CHECK5-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6269 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 6270 // CHECK5: omp.inner.for.end: 6271 // CHECK5-NEXT: store i32 100, i32* [[I]], align 4 6272 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6273 // CHECK5-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 6274 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6275 // CHECK5-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 6276 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6277 // CHECK5: omp.inner.for.cond7: 6278 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6279 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !6 6280 // CHECK5-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 6281 // CHECK5-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 6282 // CHECK5: omp.inner.for.body9: 6283 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6284 // CHECK5-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 6285 // CHECK5-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6286 // CHECK5-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !6 6287 // CHECK5-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !6 6288 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 6289 // CHECK5: omp.body.continue12: 6290 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 6291 // CHECK5: omp.inner.for.inc13: 6292 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6293 // CHECK5-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 6294 // CHECK5-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6295 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]] 6296 // CHECK5: omp.inner.for.end15: 6297 // CHECK5-NEXT: store i32 100, i32* [[I6]], align 4 6298 // CHECK5-NEXT: ret void 6299 // 6300 // 6301 // CHECK5-LABEL: define {{[^@]+}}@main 6302 // CHECK5-SAME: () #[[ATTR1:[0-9]+]] { 6303 // CHECK5-NEXT: entry: 6304 // CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 6305 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 6306 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6307 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6308 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6309 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 6310 // CHECK5-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6311 // CHECK5-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6312 // CHECK5-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6313 // CHECK5-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6314 // CHECK5-NEXT: [[I6:%.*]] = alloca i32, align 4 6315 // CHECK5-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 6316 // CHECK5-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 6317 // CHECK5-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 6318 // CHECK5-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 6319 // CHECK5-NEXT: [[I20:%.*]] = alloca i32, align 4 6320 // CHECK5-NEXT: store i32 0, i32* [[RETVAL]], align 4 6321 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6322 // CHECK5-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 6323 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6324 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6325 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6326 // CHECK5: omp.inner.for.cond: 6327 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6328 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !9 6329 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6330 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6331 // CHECK5: omp.inner.for.body: 6332 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6333 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6334 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6335 // CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !9 6336 // CHECK5-NEXT: call void @_Z3fn4v(), !llvm.access.group !9 6337 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6338 // CHECK5: omp.body.continue: 6339 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6340 // CHECK5: omp.inner.for.inc: 6341 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6342 // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 6343 // CHECK5-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6344 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] 6345 // CHECK5: omp.inner.for.end: 6346 // CHECK5-NEXT: store i32 100, i32* [[I]], align 4 6347 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6348 // CHECK5-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 6349 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6350 // CHECK5-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 6351 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6352 // CHECK5: omp.inner.for.cond7: 6353 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !12 6354 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !12 6355 // CHECK5-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 6356 // CHECK5-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 6357 // CHECK5: omp.inner.for.body9: 6358 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !12 6359 // CHECK5-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 6360 // CHECK5-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6361 // CHECK5-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !12 6362 // CHECK5-NEXT: call void @_Z3fn5v(), !llvm.access.group !12 6363 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 6364 // CHECK5: omp.body.continue12: 6365 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 6366 // CHECK5: omp.inner.for.inc13: 6367 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !12 6368 // CHECK5-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 6369 // CHECK5-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !12 6370 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] 6371 // CHECK5: omp.inner.for.end15: 6372 // CHECK5-NEXT: store i32 100, i32* [[I6]], align 4 6373 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 6374 // CHECK5-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 6375 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 6376 // CHECK5-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 6377 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 6378 // CHECK5: omp.inner.for.cond21: 6379 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !15 6380 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !15 6381 // CHECK5-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 6382 // CHECK5-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 6383 // CHECK5: omp.inner.for.body23: 6384 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !15 6385 // CHECK5-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 6386 // CHECK5-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 6387 // CHECK5-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !15 6388 // CHECK5-NEXT: call void @_Z3fn6v(), !llvm.access.group !15 6389 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 6390 // CHECK5: omp.body.continue26: 6391 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 6392 // CHECK5: omp.inner.for.inc27: 6393 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !15 6394 // CHECK5-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 6395 // CHECK5-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !15 6396 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP16:![0-9]+]] 6397 // CHECK5: omp.inner.for.end29: 6398 // CHECK5-NEXT: store i32 100, i32* [[I20]], align 4 6399 // CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 6400 // CHECK5-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 6401 // CHECK5-NEXT: ret i32 [[CALL]] 6402 // 6403 // 6404 // CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 6405 // CHECK5-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 6406 // CHECK5-NEXT: entry: 6407 // CHECK5-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 6408 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 6409 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6410 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6411 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6412 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 6413 // CHECK5-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6414 // CHECK5-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6415 // CHECK5-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6416 // CHECK5-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6417 // CHECK5-NEXT: [[I6:%.*]] = alloca i32, align 4 6418 // CHECK5-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 6419 // CHECK5-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 6420 // CHECK5-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 6421 // CHECK5-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 6422 // CHECK5-NEXT: [[I20:%.*]] = alloca i32, align 4 6423 // CHECK5-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 6424 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6425 // CHECK5-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 6426 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6427 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6428 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6429 // CHECK5: omp.inner.for.cond: 6430 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 6431 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18 6432 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6433 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6434 // CHECK5: omp.inner.for.body: 6435 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 6436 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6437 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6438 // CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18 6439 // CHECK5-NEXT: call void @_Z3fn1v(), !llvm.access.group !18 6440 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6441 // CHECK5: omp.body.continue: 6442 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6443 // CHECK5: omp.inner.for.inc: 6444 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 6445 // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 6446 // CHECK5-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 6447 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 6448 // CHECK5: omp.inner.for.end: 6449 // CHECK5-NEXT: store i32 100, i32* [[I]], align 4 6450 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6451 // CHECK5-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 6452 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6453 // CHECK5-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 6454 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6455 // CHECK5: omp.inner.for.cond7: 6456 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21 6457 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !21 6458 // CHECK5-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 6459 // CHECK5-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 6460 // CHECK5: omp.inner.for.body9: 6461 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21 6462 // CHECK5-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 6463 // CHECK5-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6464 // CHECK5-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !21 6465 // CHECK5-NEXT: call void @_Z3fn2v(), !llvm.access.group !21 6466 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 6467 // CHECK5: omp.body.continue12: 6468 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 6469 // CHECK5: omp.inner.for.inc13: 6470 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21 6471 // CHECK5-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 6472 // CHECK5-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21 6473 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]] 6474 // CHECK5: omp.inner.for.end15: 6475 // CHECK5-NEXT: store i32 100, i32* [[I6]], align 4 6476 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 6477 // CHECK5-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 6478 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 6479 // CHECK5-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 6480 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 6481 // CHECK5: omp.inner.for.cond21: 6482 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !24 6483 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !24 6484 // CHECK5-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 6485 // CHECK5-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 6486 // CHECK5: omp.inner.for.body23: 6487 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !24 6488 // CHECK5-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 6489 // CHECK5-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 6490 // CHECK5-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !24 6491 // CHECK5-NEXT: call void @_Z3fn3v(), !llvm.access.group !24 6492 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 6493 // CHECK5: omp.body.continue26: 6494 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 6495 // CHECK5: omp.inner.for.inc27: 6496 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !24 6497 // CHECK5-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 6498 // CHECK5-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !24 6499 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP25:![0-9]+]] 6500 // CHECK5: omp.inner.for.end29: 6501 // CHECK5-NEXT: store i32 100, i32* [[I20]], align 4 6502 // CHECK5-NEXT: ret i32 0 6503 // 6504 // 6505 // CHECK6-LABEL: define {{[^@]+}}@_Z9gtid_testv 6506 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] { 6507 // CHECK6-NEXT: entry: 6508 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 6509 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6510 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6511 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6512 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 6513 // CHECK6-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6514 // CHECK6-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6515 // CHECK6-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6516 // CHECK6-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6517 // CHECK6-NEXT: [[I6:%.*]] = alloca i32, align 4 6518 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6519 // CHECK6-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 6520 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6521 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6522 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6523 // CHECK6: omp.inner.for.cond: 6524 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6525 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 6526 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6527 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6528 // CHECK6: omp.inner.for.body: 6529 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6530 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6531 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6532 // CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 6533 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6534 // CHECK6: omp.body.continue: 6535 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6536 // CHECK6: omp.inner.for.inc: 6537 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6538 // CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 6539 // CHECK6-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6540 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 6541 // CHECK6: omp.inner.for.end: 6542 // CHECK6-NEXT: store i32 100, i32* [[I]], align 4 6543 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6544 // CHECK6-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 6545 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6546 // CHECK6-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 6547 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6548 // CHECK6: omp.inner.for.cond7: 6549 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6550 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !6 6551 // CHECK6-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 6552 // CHECK6-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 6553 // CHECK6: omp.inner.for.body9: 6554 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6555 // CHECK6-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 6556 // CHECK6-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6557 // CHECK6-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !6 6558 // CHECK6-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !6 6559 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 6560 // CHECK6: omp.body.continue12: 6561 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 6562 // CHECK6: omp.inner.for.inc13: 6563 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6564 // CHECK6-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 6565 // CHECK6-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6566 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]] 6567 // CHECK6: omp.inner.for.end15: 6568 // CHECK6-NEXT: store i32 100, i32* [[I6]], align 4 6569 // CHECK6-NEXT: ret void 6570 // 6571 // 6572 // CHECK6-LABEL: define {{[^@]+}}@main 6573 // CHECK6-SAME: () #[[ATTR1:[0-9]+]] { 6574 // CHECK6-NEXT: entry: 6575 // CHECK6-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 6576 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 6577 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6578 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6579 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6580 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 6581 // CHECK6-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6582 // CHECK6-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6583 // CHECK6-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6584 // CHECK6-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6585 // CHECK6-NEXT: [[I6:%.*]] = alloca i32, align 4 6586 // CHECK6-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 6587 // CHECK6-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 6588 // CHECK6-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 6589 // CHECK6-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 6590 // CHECK6-NEXT: [[I20:%.*]] = alloca i32, align 4 6591 // CHECK6-NEXT: store i32 0, i32* [[RETVAL]], align 4 6592 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6593 // CHECK6-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 6594 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6595 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6596 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6597 // CHECK6: omp.inner.for.cond: 6598 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6599 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !9 6600 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6601 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6602 // CHECK6: omp.inner.for.body: 6603 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6604 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6605 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6606 // CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !9 6607 // CHECK6-NEXT: call void @_Z3fn4v(), !llvm.access.group !9 6608 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6609 // CHECK6: omp.body.continue: 6610 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6611 // CHECK6: omp.inner.for.inc: 6612 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6613 // CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 6614 // CHECK6-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6615 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] 6616 // CHECK6: omp.inner.for.end: 6617 // CHECK6-NEXT: store i32 100, i32* [[I]], align 4 6618 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6619 // CHECK6-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 6620 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6621 // CHECK6-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 6622 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6623 // CHECK6: omp.inner.for.cond7: 6624 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !12 6625 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !12 6626 // CHECK6-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 6627 // CHECK6-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 6628 // CHECK6: omp.inner.for.body9: 6629 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !12 6630 // CHECK6-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 6631 // CHECK6-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6632 // CHECK6-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !12 6633 // CHECK6-NEXT: call void @_Z3fn5v(), !llvm.access.group !12 6634 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 6635 // CHECK6: omp.body.continue12: 6636 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 6637 // CHECK6: omp.inner.for.inc13: 6638 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !12 6639 // CHECK6-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 6640 // CHECK6-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !12 6641 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP13:![0-9]+]] 6642 // CHECK6: omp.inner.for.end15: 6643 // CHECK6-NEXT: store i32 100, i32* [[I6]], align 4 6644 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 6645 // CHECK6-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 6646 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 6647 // CHECK6-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 6648 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 6649 // CHECK6: omp.inner.for.cond21: 6650 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !15 6651 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !15 6652 // CHECK6-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 6653 // CHECK6-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 6654 // CHECK6: omp.inner.for.body23: 6655 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !15 6656 // CHECK6-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 6657 // CHECK6-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 6658 // CHECK6-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !15 6659 // CHECK6-NEXT: call void @_Z3fn6v(), !llvm.access.group !15 6660 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 6661 // CHECK6: omp.body.continue26: 6662 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 6663 // CHECK6: omp.inner.for.inc27: 6664 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !15 6665 // CHECK6-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 6666 // CHECK6-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !15 6667 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP16:![0-9]+]] 6668 // CHECK6: omp.inner.for.end29: 6669 // CHECK6-NEXT: store i32 100, i32* [[I20]], align 4 6670 // CHECK6-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 6671 // CHECK6-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 6672 // CHECK6-NEXT: ret i32 [[CALL]] 6673 // 6674 // 6675 // CHECK6-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 6676 // CHECK6-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 6677 // CHECK6-NEXT: entry: 6678 // CHECK6-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 6679 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 6680 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6681 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6682 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6683 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 6684 // CHECK6-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6685 // CHECK6-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6686 // CHECK6-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6687 // CHECK6-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6688 // CHECK6-NEXT: [[I6:%.*]] = alloca i32, align 4 6689 // CHECK6-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 6690 // CHECK6-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 6691 // CHECK6-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 6692 // CHECK6-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 6693 // CHECK6-NEXT: [[I20:%.*]] = alloca i32, align 4 6694 // CHECK6-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 6695 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6696 // CHECK6-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 6697 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6698 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6699 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6700 // CHECK6: omp.inner.for.cond: 6701 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 6702 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18 6703 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6704 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6705 // CHECK6: omp.inner.for.body: 6706 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 6707 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6708 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6709 // CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18 6710 // CHECK6-NEXT: call void @_Z3fn1v(), !llvm.access.group !18 6711 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6712 // CHECK6: omp.body.continue: 6713 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6714 // CHECK6: omp.inner.for.inc: 6715 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 6716 // CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 6717 // CHECK6-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 6718 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 6719 // CHECK6: omp.inner.for.end: 6720 // CHECK6-NEXT: store i32 100, i32* [[I]], align 4 6721 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6722 // CHECK6-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 6723 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6724 // CHECK6-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 6725 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6726 // CHECK6: omp.inner.for.cond7: 6727 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21 6728 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !21 6729 // CHECK6-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 6730 // CHECK6-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 6731 // CHECK6: omp.inner.for.body9: 6732 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21 6733 // CHECK6-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 6734 // CHECK6-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6735 // CHECK6-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !21 6736 // CHECK6-NEXT: call void @_Z3fn2v(), !llvm.access.group !21 6737 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 6738 // CHECK6: omp.body.continue12: 6739 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 6740 // CHECK6: omp.inner.for.inc13: 6741 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21 6742 // CHECK6-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 6743 // CHECK6-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !21 6744 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP22:![0-9]+]] 6745 // CHECK6: omp.inner.for.end15: 6746 // CHECK6-NEXT: store i32 100, i32* [[I6]], align 4 6747 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 6748 // CHECK6-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 6749 // CHECK6-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 6750 // CHECK6-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 6751 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 6752 // CHECK6: omp.inner.for.cond21: 6753 // CHECK6-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !24 6754 // CHECK6-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !24 6755 // CHECK6-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 6756 // CHECK6-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 6757 // CHECK6: omp.inner.for.body23: 6758 // CHECK6-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !24 6759 // CHECK6-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 6760 // CHECK6-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 6761 // CHECK6-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !24 6762 // CHECK6-NEXT: call void @_Z3fn3v(), !llvm.access.group !24 6763 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 6764 // CHECK6: omp.body.continue26: 6765 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 6766 // CHECK6: omp.inner.for.inc27: 6767 // CHECK6-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !24 6768 // CHECK6-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 6769 // CHECK6-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !24 6770 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP25:![0-9]+]] 6771 // CHECK6: omp.inner.for.end29: 6772 // CHECK6-NEXT: store i32 100, i32* [[I20]], align 4 6773 // CHECK6-NEXT: ret i32 0 6774 // 6775 // 6776 // CHECK7-LABEL: define {{[^@]+}}@_Z9gtid_testv 6777 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] { 6778 // CHECK7-NEXT: entry: 6779 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 6780 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6781 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6782 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6783 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 6784 // CHECK7-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6785 // CHECK7-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6786 // CHECK7-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6787 // CHECK7-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6788 // CHECK7-NEXT: [[I6:%.*]] = alloca i32, align 4 6789 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6790 // CHECK7-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 6791 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6792 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6793 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6794 // CHECK7: omp.inner.for.cond: 6795 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6796 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 6797 // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6798 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6799 // CHECK7: omp.inner.for.body: 6800 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6801 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6802 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6803 // CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 6804 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6805 // CHECK7: omp.body.continue: 6806 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6807 // CHECK7: omp.inner.for.inc: 6808 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6809 // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 6810 // CHECK7-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 6811 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 6812 // CHECK7: omp.inner.for.end: 6813 // CHECK7-NEXT: store i32 100, i32* [[I]], align 4 6814 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6815 // CHECK7-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 6816 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6817 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 6818 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6819 // CHECK7: omp.inner.for.cond7: 6820 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6821 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !6 6822 // CHECK7-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 6823 // CHECK7-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 6824 // CHECK7: omp.inner.for.body9: 6825 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6826 // CHECK7-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 6827 // CHECK7-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6828 // CHECK7-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !6 6829 // CHECK7-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !6 6830 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 6831 // CHECK7: omp.body.continue12: 6832 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 6833 // CHECK7: omp.inner.for.inc13: 6834 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6835 // CHECK7-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 6836 // CHECK7-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 6837 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]] 6838 // CHECK7: omp.inner.for.end15: 6839 // CHECK7-NEXT: store i32 100, i32* [[I6]], align 4 6840 // CHECK7-NEXT: ret void 6841 // 6842 // 6843 // CHECK7-LABEL: define {{[^@]+}}@main 6844 // CHECK7-SAME: () #[[ATTR1:[0-9]+]] { 6845 // CHECK7-NEXT: entry: 6846 // CHECK7-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 6847 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 6848 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6849 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6850 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6851 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 6852 // CHECK7-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6853 // CHECK7-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6854 // CHECK7-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6855 // CHECK7-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6856 // CHECK7-NEXT: [[I6:%.*]] = alloca i32, align 4 6857 // CHECK7-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 6858 // CHECK7-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 6859 // CHECK7-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 6860 // CHECK7-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 6861 // CHECK7-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 6862 // CHECK7-NEXT: [[I20:%.*]] = alloca i32, align 4 6863 // CHECK7-NEXT: store i32 0, i32* [[RETVAL]], align 4 6864 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 6865 // CHECK7-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 6866 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 6867 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 6868 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 6869 // CHECK7: omp.inner.for.cond: 6870 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6871 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !9 6872 // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 6873 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 6874 // CHECK7: omp.inner.for.body: 6875 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6876 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 6877 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 6878 // CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !9 6879 // CHECK7-NEXT: call void @_Z3fn4v(), !llvm.access.group !9 6880 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 6881 // CHECK7: omp.body.continue: 6882 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 6883 // CHECK7: omp.inner.for.inc: 6884 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6885 // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 6886 // CHECK7-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 6887 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] 6888 // CHECK7: omp.inner.for.end: 6889 // CHECK7-NEXT: store i32 100, i32* [[I]], align 4 6890 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 6891 // CHECK7-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 6892 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 6893 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 6894 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 6895 // CHECK7: omp.inner.for.cond7: 6896 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 6897 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4 6898 // CHECK7-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 6899 // CHECK7-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 6900 // CHECK7: omp.inner.for.body9: 6901 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 6902 // CHECK7-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 6903 // CHECK7-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 6904 // CHECK7-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4 6905 // CHECK7-NEXT: call void @_Z3fn5v() 6906 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 6907 // CHECK7: omp.body.continue12: 6908 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 6909 // CHECK7: omp.inner.for.inc13: 6910 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 6911 // CHECK7-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 6912 // CHECK7-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4 6913 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP12:![0-9]+]] 6914 // CHECK7: omp.inner.for.end15: 6915 // CHECK7-NEXT: store i32 100, i32* [[I6]], align 4 6916 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* @Arg, align 4 6917 // CHECK7-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 6918 // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 6919 // CHECK7-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 6920 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 6921 // CHECK7-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 6922 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 6923 // CHECK7-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV19]], align 4 6924 // CHECK7-NEXT: [[TMP12:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 6925 // CHECK7-NEXT: [[TOBOOL21:%.*]] = trunc i8 [[TMP12]] to i1 6926 // CHECK7-NEXT: br i1 [[TOBOOL21]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 6927 // CHECK7: omp_if.then: 6928 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]] 6929 // CHECK7: omp.inner.for.cond22: 6930 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !14 6931 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !14 6932 // CHECK7-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 6933 // CHECK7-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END30:%.*]] 6934 // CHECK7: omp.inner.for.body24: 6935 // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !14 6936 // CHECK7-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP15]], 1 6937 // CHECK7-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]] 6938 // CHECK7-NEXT: store i32 [[ADD26]], i32* [[I20]], align 4, !llvm.access.group !14 6939 // CHECK7-NEXT: call void @_Z3fn6v(), !llvm.access.group !14 6940 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] 6941 // CHECK7: omp.body.continue27: 6942 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] 6943 // CHECK7: omp.inner.for.inc28: 6944 // CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !14 6945 // CHECK7-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP16]], 1 6946 // CHECK7-NEXT: store i32 [[ADD29]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !14 6947 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP15:![0-9]+]] 6948 // CHECK7: omp.inner.for.end30: 6949 // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] 6950 // CHECK7: omp_if.else: 6951 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND31:%.*]] 6952 // CHECK7: omp.inner.for.cond31: 6953 // CHECK7-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 6954 // CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4 6955 // CHECK7-NEXT: [[CMP32:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 6956 // CHECK7-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END39:%.*]] 6957 // CHECK7: omp.inner.for.body33: 6958 // CHECK7-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 6959 // CHECK7-NEXT: [[MUL34:%.*]] = mul nsw i32 [[TMP19]], 1 6960 // CHECK7-NEXT: [[ADD35:%.*]] = add nsw i32 0, [[MUL34]] 6961 // CHECK7-NEXT: store i32 [[ADD35]], i32* [[I20]], align 4 6962 // CHECK7-NEXT: call void @_Z3fn6v() 6963 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE36:%.*]] 6964 // CHECK7: omp.body.continue36: 6965 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC37:%.*]] 6966 // CHECK7: omp.inner.for.inc37: 6967 // CHECK7-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 6968 // CHECK7-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP20]], 1 6969 // CHECK7-NEXT: store i32 [[ADD38]], i32* [[DOTOMP_IV19]], align 4 6970 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND31]], !llvm.loop [[LOOP17:![0-9]+]] 6971 // CHECK7: omp.inner.for.end39: 6972 // CHECK7-NEXT: br label [[OMP_IF_END]] 6973 // CHECK7: omp_if.end: 6974 // CHECK7-NEXT: store i32 100, i32* [[I20]], align 4 6975 // CHECK7-NEXT: [[TMP21:%.*]] = load i32, i32* @Arg, align 4 6976 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP21]]) 6977 // CHECK7-NEXT: ret i32 [[CALL]] 6978 // 6979 // 6980 // CHECK7-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 6981 // CHECK7-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 6982 // CHECK7-NEXT: entry: 6983 // CHECK7-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 6984 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 6985 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 6986 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 6987 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 6988 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 6989 // CHECK7-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 6990 // CHECK7-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 6991 // CHECK7-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 6992 // CHECK7-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 6993 // CHECK7-NEXT: [[I6:%.*]] = alloca i32, align 4 6994 // CHECK7-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 6995 // CHECK7-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 6996 // CHECK7-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 6997 // CHECK7-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 6998 // CHECK7-NEXT: [[I20:%.*]] = alloca i32, align 4 6999 // CHECK7-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 7000 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7001 // CHECK7-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 7002 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7003 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 7004 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7005 // CHECK7: omp.inner.for.cond: 7006 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 7007 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18 7008 // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 7009 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7010 // CHECK7: omp.inner.for.body: 7011 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 7012 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 7013 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 7014 // CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18 7015 // CHECK7-NEXT: call void @_Z3fn1v(), !llvm.access.group !18 7016 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7017 // CHECK7: omp.body.continue: 7018 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7019 // CHECK7: omp.inner.for.inc: 7020 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 7021 // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 7022 // CHECK7-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 7023 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 7024 // CHECK7: omp.inner.for.end: 7025 // CHECK7-NEXT: store i32 100, i32* [[I]], align 4 7026 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 7027 // CHECK7-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 7028 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 7029 // CHECK7-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 7030 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 7031 // CHECK7: omp.inner.for.cond7: 7032 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 7033 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4 7034 // CHECK7-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 7035 // CHECK7-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 7036 // CHECK7: omp.inner.for.body9: 7037 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 7038 // CHECK7-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 7039 // CHECK7-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 7040 // CHECK7-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4 7041 // CHECK7-NEXT: call void @_Z3fn2v() 7042 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 7043 // CHECK7: omp.body.continue12: 7044 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 7045 // CHECK7: omp.inner.for.inc13: 7046 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 7047 // CHECK7-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 7048 // CHECK7-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4 7049 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP21:![0-9]+]] 7050 // CHECK7: omp.inner.for.end15: 7051 // CHECK7-NEXT: store i32 100, i32* [[I6]], align 4 7052 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 7053 // CHECK7-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 7054 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 7055 // CHECK7-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 7056 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 7057 // CHECK7: omp.inner.for.cond21: 7058 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !22 7059 // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !22 7060 // CHECK7-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 7061 // CHECK7-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 7062 // CHECK7: omp.inner.for.body23: 7063 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !22 7064 // CHECK7-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 7065 // CHECK7-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 7066 // CHECK7-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !22 7067 // CHECK7-NEXT: call void @_Z3fn3v(), !llvm.access.group !22 7068 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 7069 // CHECK7: omp.body.continue26: 7070 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 7071 // CHECK7: omp.inner.for.inc27: 7072 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !22 7073 // CHECK7-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 7074 // CHECK7-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !22 7075 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP23:![0-9]+]] 7076 // CHECK7: omp.inner.for.end29: 7077 // CHECK7-NEXT: store i32 100, i32* [[I20]], align 4 7078 // CHECK7-NEXT: ret i32 0 7079 // 7080 // 7081 // CHECK8-LABEL: define {{[^@]+}}@_Z9gtid_testv 7082 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] { 7083 // CHECK8-NEXT: entry: 7084 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 7085 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7086 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7087 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7088 // CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4 7089 // CHECK8-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 7090 // CHECK8-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 7091 // CHECK8-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 7092 // CHECK8-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 7093 // CHECK8-NEXT: [[I6:%.*]] = alloca i32, align 4 7094 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7095 // CHECK8-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 7096 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7097 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 7098 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7099 // CHECK8: omp.inner.for.cond: 7100 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 7101 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 7102 // CHECK8-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 7103 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7104 // CHECK8: omp.inner.for.body: 7105 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 7106 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 7107 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 7108 // CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 7109 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7110 // CHECK8: omp.body.continue: 7111 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7112 // CHECK8: omp.inner.for.inc: 7113 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 7114 // CHECK8-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 7115 // CHECK8-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 7116 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 7117 // CHECK8: omp.inner.for.end: 7118 // CHECK8-NEXT: store i32 100, i32* [[I]], align 4 7119 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 7120 // CHECK8-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 7121 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 7122 // CHECK8-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 7123 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 7124 // CHECK8: omp.inner.for.cond7: 7125 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 7126 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !6 7127 // CHECK8-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 7128 // CHECK8-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 7129 // CHECK8: omp.inner.for.body9: 7130 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 7131 // CHECK8-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 7132 // CHECK8-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 7133 // CHECK8-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !6 7134 // CHECK8-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !6 7135 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 7136 // CHECK8: omp.body.continue12: 7137 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 7138 // CHECK8: omp.inner.for.inc13: 7139 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 7140 // CHECK8-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 7141 // CHECK8-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !6 7142 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP7:![0-9]+]] 7143 // CHECK8: omp.inner.for.end15: 7144 // CHECK8-NEXT: store i32 100, i32* [[I6]], align 4 7145 // CHECK8-NEXT: ret void 7146 // 7147 // 7148 // CHECK8-LABEL: define {{[^@]+}}@main 7149 // CHECK8-SAME: () #[[ATTR1:[0-9]+]] { 7150 // CHECK8-NEXT: entry: 7151 // CHECK8-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 7152 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 7153 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7154 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7155 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7156 // CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4 7157 // CHECK8-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 7158 // CHECK8-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 7159 // CHECK8-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 7160 // CHECK8-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 7161 // CHECK8-NEXT: [[I6:%.*]] = alloca i32, align 4 7162 // CHECK8-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 7163 // CHECK8-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 7164 // CHECK8-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 7165 // CHECK8-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 7166 // CHECK8-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 7167 // CHECK8-NEXT: [[I20:%.*]] = alloca i32, align 4 7168 // CHECK8-NEXT: store i32 0, i32* [[RETVAL]], align 4 7169 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7170 // CHECK8-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 7171 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7172 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 7173 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7174 // CHECK8: omp.inner.for.cond: 7175 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 7176 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !9 7177 // CHECK8-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 7178 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7179 // CHECK8: omp.inner.for.body: 7180 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 7181 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 7182 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 7183 // CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !9 7184 // CHECK8-NEXT: call void @_Z3fn4v(), !llvm.access.group !9 7185 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7186 // CHECK8: omp.body.continue: 7187 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7188 // CHECK8: omp.inner.for.inc: 7189 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 7190 // CHECK8-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 7191 // CHECK8-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !9 7192 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] 7193 // CHECK8: omp.inner.for.end: 7194 // CHECK8-NEXT: store i32 100, i32* [[I]], align 4 7195 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 7196 // CHECK8-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 7197 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 7198 // CHECK8-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 7199 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 7200 // CHECK8: omp.inner.for.cond7: 7201 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 7202 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4 7203 // CHECK8-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 7204 // CHECK8-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 7205 // CHECK8: omp.inner.for.body9: 7206 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 7207 // CHECK8-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 7208 // CHECK8-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 7209 // CHECK8-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4 7210 // CHECK8-NEXT: call void @_Z3fn5v() 7211 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 7212 // CHECK8: omp.body.continue12: 7213 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 7214 // CHECK8: omp.inner.for.inc13: 7215 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 7216 // CHECK8-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 7217 // CHECK8-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4 7218 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP12:![0-9]+]] 7219 // CHECK8: omp.inner.for.end15: 7220 // CHECK8-NEXT: store i32 100, i32* [[I6]], align 4 7221 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* @Arg, align 4 7222 // CHECK8-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 7223 // CHECK8-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 7224 // CHECK8-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 7225 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 7226 // CHECK8-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 7227 // CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 7228 // CHECK8-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV19]], align 4 7229 // CHECK8-NEXT: [[TMP12:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 7230 // CHECK8-NEXT: [[TOBOOL21:%.*]] = trunc i8 [[TMP12]] to i1 7231 // CHECK8-NEXT: br i1 [[TOBOOL21]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 7232 // CHECK8: omp_if.then: 7233 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]] 7234 // CHECK8: omp.inner.for.cond22: 7235 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !14 7236 // CHECK8-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !14 7237 // CHECK8-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 7238 // CHECK8-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END30:%.*]] 7239 // CHECK8: omp.inner.for.body24: 7240 // CHECK8-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !14 7241 // CHECK8-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP15]], 1 7242 // CHECK8-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]] 7243 // CHECK8-NEXT: store i32 [[ADD26]], i32* [[I20]], align 4, !llvm.access.group !14 7244 // CHECK8-NEXT: call void @_Z3fn6v(), !llvm.access.group !14 7245 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] 7246 // CHECK8: omp.body.continue27: 7247 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] 7248 // CHECK8: omp.inner.for.inc28: 7249 // CHECK8-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !14 7250 // CHECK8-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP16]], 1 7251 // CHECK8-NEXT: store i32 [[ADD29]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !14 7252 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP15:![0-9]+]] 7253 // CHECK8: omp.inner.for.end30: 7254 // CHECK8-NEXT: br label [[OMP_IF_END:%.*]] 7255 // CHECK8: omp_if.else: 7256 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND31:%.*]] 7257 // CHECK8: omp.inner.for.cond31: 7258 // CHECK8-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 7259 // CHECK8-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4 7260 // CHECK8-NEXT: [[CMP32:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 7261 // CHECK8-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END39:%.*]] 7262 // CHECK8: omp.inner.for.body33: 7263 // CHECK8-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 7264 // CHECK8-NEXT: [[MUL34:%.*]] = mul nsw i32 [[TMP19]], 1 7265 // CHECK8-NEXT: [[ADD35:%.*]] = add nsw i32 0, [[MUL34]] 7266 // CHECK8-NEXT: store i32 [[ADD35]], i32* [[I20]], align 4 7267 // CHECK8-NEXT: call void @_Z3fn6v() 7268 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE36:%.*]] 7269 // CHECK8: omp.body.continue36: 7270 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC37:%.*]] 7271 // CHECK8: omp.inner.for.inc37: 7272 // CHECK8-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 7273 // CHECK8-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP20]], 1 7274 // CHECK8-NEXT: store i32 [[ADD38]], i32* [[DOTOMP_IV19]], align 4 7275 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND31]], !llvm.loop [[LOOP17:![0-9]+]] 7276 // CHECK8: omp.inner.for.end39: 7277 // CHECK8-NEXT: br label [[OMP_IF_END]] 7278 // CHECK8: omp_if.end: 7279 // CHECK8-NEXT: store i32 100, i32* [[I20]], align 4 7280 // CHECK8-NEXT: [[TMP21:%.*]] = load i32, i32* @Arg, align 4 7281 // CHECK8-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP21]]) 7282 // CHECK8-NEXT: ret i32 [[CALL]] 7283 // 7284 // 7285 // CHECK8-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 7286 // CHECK8-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 7287 // CHECK8-NEXT: entry: 7288 // CHECK8-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 7289 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 7290 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7291 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7292 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7293 // CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4 7294 // CHECK8-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 7295 // CHECK8-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 7296 // CHECK8-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 7297 // CHECK8-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 7298 // CHECK8-NEXT: [[I6:%.*]] = alloca i32, align 4 7299 // CHECK8-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 7300 // CHECK8-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 7301 // CHECK8-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 7302 // CHECK8-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 7303 // CHECK8-NEXT: [[I20:%.*]] = alloca i32, align 4 7304 // CHECK8-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 7305 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7306 // CHECK8-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 7307 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7308 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 7309 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7310 // CHECK8: omp.inner.for.cond: 7311 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 7312 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !18 7313 // CHECK8-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 7314 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7315 // CHECK8: omp.inner.for.body: 7316 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 7317 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 7318 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 7319 // CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !18 7320 // CHECK8-NEXT: call void @_Z3fn1v(), !llvm.access.group !18 7321 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7322 // CHECK8: omp.body.continue: 7323 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7324 // CHECK8: omp.inner.for.inc: 7325 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 7326 // CHECK8-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 7327 // CHECK8-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !18 7328 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]] 7329 // CHECK8: omp.inner.for.end: 7330 // CHECK8-NEXT: store i32 100, i32* [[I]], align 4 7331 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 7332 // CHECK8-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 7333 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 7334 // CHECK8-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 7335 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 7336 // CHECK8: omp.inner.for.cond7: 7337 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 7338 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4 7339 // CHECK8-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 7340 // CHECK8-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 7341 // CHECK8: omp.inner.for.body9: 7342 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 7343 // CHECK8-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 7344 // CHECK8-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 7345 // CHECK8-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4 7346 // CHECK8-NEXT: call void @_Z3fn2v() 7347 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 7348 // CHECK8: omp.body.continue12: 7349 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 7350 // CHECK8: omp.inner.for.inc13: 7351 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 7352 // CHECK8-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 7353 // CHECK8-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4 7354 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP21:![0-9]+]] 7355 // CHECK8: omp.inner.for.end15: 7356 // CHECK8-NEXT: store i32 100, i32* [[I6]], align 4 7357 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 7358 // CHECK8-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 7359 // CHECK8-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 7360 // CHECK8-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 7361 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 7362 // CHECK8: omp.inner.for.cond21: 7363 // CHECK8-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !22 7364 // CHECK8-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !22 7365 // CHECK8-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 7366 // CHECK8-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 7367 // CHECK8: omp.inner.for.body23: 7368 // CHECK8-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !22 7369 // CHECK8-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 7370 // CHECK8-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 7371 // CHECK8-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !22 7372 // CHECK8-NEXT: call void @_Z3fn3v(), !llvm.access.group !22 7373 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 7374 // CHECK8: omp.body.continue26: 7375 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 7376 // CHECK8: omp.inner.for.inc27: 7377 // CHECK8-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !22 7378 // CHECK8-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 7379 // CHECK8-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !22 7380 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP23:![0-9]+]] 7381 // CHECK8: omp.inner.for.end29: 7382 // CHECK8-NEXT: store i32 100, i32* [[I20]], align 4 7383 // CHECK8-NEXT: ret i32 0 7384 // 7385 // 7386 // CHECK9-LABEL: define {{[^@]+}}@_Z9gtid_testv 7387 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] { 7388 // CHECK9-NEXT: entry: 7389 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7390 // CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 7391 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100) 7392 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 7393 // CHECK9-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 7394 // CHECK9-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 7395 // CHECK9: omp_offload.failed: 7396 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43() #[[ATTR2:[0-9]+]] 7397 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] 7398 // CHECK9: omp_offload.cont: 7399 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 7400 // CHECK9-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 7401 // CHECK9-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 7402 // CHECK9-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 7403 // CHECK9: omp_offload.failed2: 7404 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48() #[[ATTR2]] 7405 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT3]] 7406 // CHECK9: omp_offload.cont3: 7407 // CHECK9-NEXT: ret void 7408 // 7409 // 7410 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43 7411 // CHECK9-SAME: () #[[ATTR1:[0-9]+]] { 7412 // CHECK9-NEXT: entry: 7413 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 7414 // CHECK9-NEXT: ret void 7415 // 7416 // 7417 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined. 7418 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 7419 // CHECK9-NEXT: entry: 7420 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7421 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7422 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7423 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7424 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 7425 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 7426 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7427 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7428 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 7429 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7430 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7431 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 7432 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 7433 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7434 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7435 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7436 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 7437 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7438 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 7439 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 7440 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7441 // CHECK9: cond.true: 7442 // CHECK9-NEXT: br label [[COND_END:%.*]] 7443 // CHECK9: cond.false: 7444 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 7445 // CHECK9-NEXT: br label [[COND_END]] 7446 // CHECK9: cond.end: 7447 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 7448 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 7449 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 7450 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 7451 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7452 // CHECK9: omp.inner.for.cond: 7453 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 7454 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 7455 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 7456 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7457 // CHECK9: omp.inner.for.body: 7458 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 7459 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 7460 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 7461 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 7462 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !15 7463 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7464 // CHECK9: omp.inner.for.inc: 7465 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 7466 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 7467 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 7468 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 7469 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 7470 // CHECK9: omp.inner.for.end: 7471 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7472 // CHECK9: omp.loop.exit: 7473 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 7474 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7475 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 7476 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7477 // CHECK9: .omp.final.then: 7478 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 7479 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 7480 // CHECK9: .omp.final.done: 7481 // CHECK9-NEXT: ret void 7482 // 7483 // 7484 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..1 7485 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 7486 // CHECK9-NEXT: entry: 7487 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7488 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7489 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 7490 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 7491 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7492 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7493 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7494 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7495 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7496 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7497 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 7498 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7499 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7500 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 7501 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 7502 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7503 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 7504 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 7505 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 7506 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 7507 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 7508 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 7509 // CHECK9-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 7510 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7511 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7512 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7513 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 7514 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7515 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7516 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 7517 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7518 // CHECK9: cond.true: 7519 // CHECK9-NEXT: br label [[COND_END:%.*]] 7520 // CHECK9: cond.false: 7521 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7522 // CHECK9-NEXT: br label [[COND_END]] 7523 // CHECK9: cond.end: 7524 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 7525 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 7526 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7527 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 7528 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7529 // CHECK9: omp.inner.for.cond: 7530 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 7531 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19 7532 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 7533 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7534 // CHECK9: omp.inner.for.body: 7535 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 7536 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 7537 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 7538 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19 7539 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7540 // CHECK9: omp.body.continue: 7541 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7542 // CHECK9: omp.inner.for.inc: 7543 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 7544 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 7545 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 7546 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 7547 // CHECK9: omp.inner.for.end: 7548 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7549 // CHECK9: omp.loop.exit: 7550 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 7551 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7552 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 7553 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7554 // CHECK9: .omp.final.then: 7555 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 7556 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 7557 // CHECK9: .omp.final.done: 7558 // CHECK9-NEXT: ret void 7559 // 7560 // 7561 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48 7562 // CHECK9-SAME: () #[[ATTR1]] { 7563 // CHECK9-NEXT: entry: 7564 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*)) 7565 // CHECK9-NEXT: ret void 7566 // 7567 // 7568 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..2 7569 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 7570 // CHECK9-NEXT: entry: 7571 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7572 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7573 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7574 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7575 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 7576 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 7577 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7578 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7579 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 7580 // CHECK9-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 7581 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7582 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7583 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 7584 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 7585 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7586 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7587 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7588 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 7589 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7590 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 7591 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 7592 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7593 // CHECK9: cond.true: 7594 // CHECK9-NEXT: br label [[COND_END:%.*]] 7595 // CHECK9: cond.false: 7596 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 7597 // CHECK9-NEXT: br label [[COND_END]] 7598 // CHECK9: cond.end: 7599 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 7600 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 7601 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 7602 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 7603 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7604 // CHECK9: omp.inner.for.cond: 7605 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 7606 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24 7607 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 7608 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7609 // CHECK9: omp.inner.for.body: 7610 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !24 7611 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 7612 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24 7613 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 7614 // CHECK9-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !24 7615 // CHECK9-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !24 7616 // CHECK9-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !24 7617 // CHECK9-NEXT: call void @.omp_outlined..3(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !24 7618 // CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !24 7619 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7620 // CHECK9: omp.inner.for.inc: 7621 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 7622 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !24 7623 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 7624 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 7625 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 7626 // CHECK9: omp.inner.for.end: 7627 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7628 // CHECK9: omp.loop.exit: 7629 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 7630 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7631 // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 7632 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7633 // CHECK9: .omp.final.then: 7634 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 7635 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 7636 // CHECK9: .omp.final.done: 7637 // CHECK9-NEXT: ret void 7638 // 7639 // 7640 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3 7641 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 7642 // CHECK9-NEXT: entry: 7643 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7644 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7645 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 7646 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 7647 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7648 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7649 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7650 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7651 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7652 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7653 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 7654 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7655 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7656 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 7657 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 7658 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7659 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 7660 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 7661 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 7662 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 7663 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 7664 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 7665 // CHECK9-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 7666 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7667 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7668 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7669 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 7670 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7671 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7672 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 7673 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7674 // CHECK9: cond.true: 7675 // CHECK9-NEXT: br label [[COND_END:%.*]] 7676 // CHECK9: cond.false: 7677 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7678 // CHECK9-NEXT: br label [[COND_END]] 7679 // CHECK9: cond.end: 7680 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 7681 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 7682 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7683 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 7684 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7685 // CHECK9: omp.inner.for.cond: 7686 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 7687 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 7688 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 7689 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7690 // CHECK9: omp.inner.for.body: 7691 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 7692 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 7693 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 7694 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27 7695 // CHECK9-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !27 7696 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7697 // CHECK9: omp.body.continue: 7698 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7699 // CHECK9: omp.inner.for.inc: 7700 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 7701 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 7702 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 7703 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 7704 // CHECK9: omp.inner.for.end: 7705 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7706 // CHECK9: omp.loop.exit: 7707 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 7708 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7709 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 7710 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7711 // CHECK9: .omp.final.then: 7712 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 7713 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 7714 // CHECK9: .omp.final.done: 7715 // CHECK9-NEXT: ret void 7716 // 7717 // 7718 // CHECK9-LABEL: define {{[^@]+}}@main 7719 // CHECK9-SAME: () #[[ATTR3:[0-9]+]] { 7720 // CHECK9-NEXT: entry: 7721 // CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 7722 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7723 // CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 7724 // CHECK9-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 7725 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 7726 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 7727 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 7728 // CHECK9-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 7729 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 7730 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 7731 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 7732 // CHECK9-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 7733 // CHECK9-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 7734 // CHECK9: omp_offload.failed: 7735 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81() #[[ATTR2]] 7736 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] 7737 // CHECK9: omp_offload.cont: 7738 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 7739 // CHECK9-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 7740 // CHECK9-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 7741 // CHECK9-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 7742 // CHECK9: omp_offload.failed2: 7743 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90() #[[ATTR2]] 7744 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT3]] 7745 // CHECK9: omp_offload.cont3: 7746 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* @Arg, align 4 7747 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 7748 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 7749 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 7750 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 7751 // CHECK9-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 7752 // CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 7753 // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 7754 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 7755 // CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 7756 // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 7757 // CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 7758 // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 7759 // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 7760 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 7761 // CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 7762 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 7763 // CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 7764 // CHECK9: omp_offload.failed5: 7765 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99(i64 [[TMP5]]) #[[ATTR2]] 7766 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT6]] 7767 // CHECK9: omp_offload.cont6: 7768 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 7769 // CHECK9-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 7770 // CHECK9-NEXT: ret i32 [[CALL]] 7771 // 7772 // 7773 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 7774 // CHECK9-SAME: () #[[ATTR1]] { 7775 // CHECK9-NEXT: entry: 7776 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 7777 // CHECK9-NEXT: ret void 7778 // 7779 // 7780 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..4 7781 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 7782 // CHECK9-NEXT: entry: 7783 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7784 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7785 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7786 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7787 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 7788 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 7789 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7790 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7791 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 7792 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7793 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7794 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 7795 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 7796 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7797 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7798 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7799 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 7800 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7801 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 7802 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 7803 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7804 // CHECK9: cond.true: 7805 // CHECK9-NEXT: br label [[COND_END:%.*]] 7806 // CHECK9: cond.false: 7807 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 7808 // CHECK9-NEXT: br label [[COND_END]] 7809 // CHECK9: cond.end: 7810 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 7811 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 7812 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 7813 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 7814 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7815 // CHECK9: omp.inner.for.cond: 7816 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 7817 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 7818 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 7819 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7820 // CHECK9: omp.inner.for.body: 7821 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !30 7822 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 7823 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 7824 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 7825 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !30 7826 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7827 // CHECK9: omp.inner.for.inc: 7828 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 7829 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !30 7830 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 7831 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 7832 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 7833 // CHECK9: omp.inner.for.end: 7834 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7835 // CHECK9: omp.loop.exit: 7836 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 7837 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7838 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 7839 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7840 // CHECK9: .omp.final.then: 7841 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 7842 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 7843 // CHECK9: .omp.final.done: 7844 // CHECK9-NEXT: ret void 7845 // 7846 // 7847 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 7848 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 7849 // CHECK9-NEXT: entry: 7850 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7851 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7852 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 7853 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 7854 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7855 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7856 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 7857 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 7858 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7859 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7860 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 7861 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7862 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7863 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 7864 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 7865 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 7866 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 7867 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 7868 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 7869 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 7870 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 7871 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 7872 // CHECK9-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 7873 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7874 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7875 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7876 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 7877 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7878 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7879 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 7880 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7881 // CHECK9: cond.true: 7882 // CHECK9-NEXT: br label [[COND_END:%.*]] 7883 // CHECK9: cond.false: 7884 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 7885 // CHECK9-NEXT: br label [[COND_END]] 7886 // CHECK9: cond.end: 7887 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 7888 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 7889 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 7890 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 7891 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7892 // CHECK9: omp.inner.for.cond: 7893 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 7894 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33 7895 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 7896 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7897 // CHECK9: omp.inner.for.body: 7898 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 7899 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 7900 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 7901 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33 7902 // CHECK9-NEXT: call void @_Z3fn4v(), !llvm.access.group !33 7903 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 7904 // CHECK9: omp.body.continue: 7905 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7906 // CHECK9: omp.inner.for.inc: 7907 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 7908 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 7909 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 7910 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 7911 // CHECK9: omp.inner.for.end: 7912 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7913 // CHECK9: omp.loop.exit: 7914 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 7915 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7916 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 7917 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7918 // CHECK9: .omp.final.then: 7919 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 7920 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 7921 // CHECK9: .omp.final.done: 7922 // CHECK9-NEXT: ret void 7923 // 7924 // 7925 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90 7926 // CHECK9-SAME: () #[[ATTR1]] { 7927 // CHECK9-NEXT: entry: 7928 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*)) 7929 // CHECK9-NEXT: ret void 7930 // 7931 // 7932 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 7933 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 7934 // CHECK9-NEXT: entry: 7935 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 7936 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 7937 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 7938 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 7939 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 7940 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 7941 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 7942 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 7943 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 7944 // CHECK9-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 7945 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 7946 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 7947 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 7948 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 7949 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 7950 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 7951 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 7952 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 7953 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 7954 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 7955 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 7956 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 7957 // CHECK9: cond.true: 7958 // CHECK9-NEXT: br label [[COND_END:%.*]] 7959 // CHECK9: cond.false: 7960 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 7961 // CHECK9-NEXT: br label [[COND_END]] 7962 // CHECK9: cond.end: 7963 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 7964 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 7965 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 7966 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 7967 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 7968 // CHECK9: omp.inner.for.cond: 7969 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 7970 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36 7971 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 7972 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 7973 // CHECK9: omp.inner.for.body: 7974 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !36 7975 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 7976 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36 7977 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 7978 // CHECK9-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !36 7979 // CHECK9-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !36 7980 // CHECK9-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !36 7981 // CHECK9-NEXT: call void @.omp_outlined..7(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !36 7982 // CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !36 7983 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 7984 // CHECK9: omp.inner.for.inc: 7985 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 7986 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !36 7987 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 7988 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 7989 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] 7990 // CHECK9: omp.inner.for.end: 7991 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 7992 // CHECK9: omp.loop.exit: 7993 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 7994 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 7995 // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 7996 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 7997 // CHECK9: .omp.final.then: 7998 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 7999 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8000 // CHECK9: .omp.final.done: 8001 // CHECK9-NEXT: ret void 8002 // 8003 // 8004 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 8005 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 8006 // CHECK9-NEXT: entry: 8007 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8008 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8009 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 8010 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 8011 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8012 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8013 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8014 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8015 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8016 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8017 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 8018 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8019 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8020 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8021 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8022 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8023 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 8024 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8025 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 8026 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8027 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 8028 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 8029 // CHECK9-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 8030 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8031 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8032 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8033 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 8034 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8035 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8036 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 8037 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8038 // CHECK9: cond.true: 8039 // CHECK9-NEXT: br label [[COND_END:%.*]] 8040 // CHECK9: cond.false: 8041 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8042 // CHECK9-NEXT: br label [[COND_END]] 8043 // CHECK9: cond.end: 8044 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 8045 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8046 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8047 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 8048 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8049 // CHECK9: omp.inner.for.cond: 8050 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 8051 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39 8052 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 8053 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8054 // CHECK9: omp.inner.for.body: 8055 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 8056 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 8057 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 8058 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39 8059 // CHECK9-NEXT: call void @_Z3fn5v(), !llvm.access.group !39 8060 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8061 // CHECK9: omp.body.continue: 8062 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8063 // CHECK9: omp.inner.for.inc: 8064 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 8065 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 8066 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 8067 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 8068 // CHECK9: omp.inner.for.end: 8069 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8070 // CHECK9: omp.loop.exit: 8071 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 8072 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8073 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 8074 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8075 // CHECK9: .omp.final.then: 8076 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 8077 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8078 // CHECK9: .omp.final.done: 8079 // CHECK9-NEXT: ret void 8080 // 8081 // 8082 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99 8083 // CHECK9-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 8084 // CHECK9-NEXT: entry: 8085 // CHECK9-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 8086 // CHECK9-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 8087 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 8088 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]]) 8089 // CHECK9-NEXT: ret void 8090 // 8091 // 8092 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..8 8093 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 8094 // CHECK9-NEXT: entry: 8095 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8096 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8097 // CHECK9-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 8098 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8099 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8100 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 8101 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 8102 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8103 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8104 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 8105 // CHECK9-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 8106 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8107 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8108 // CHECK9-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 8109 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 8110 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 8111 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 8112 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8113 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8114 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8115 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 8116 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8117 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8118 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 8119 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8120 // CHECK9: cond.true: 8121 // CHECK9-NEXT: br label [[COND_END:%.*]] 8122 // CHECK9: cond.false: 8123 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8124 // CHECK9-NEXT: br label [[COND_END]] 8125 // CHECK9: cond.end: 8126 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 8127 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 8128 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 8129 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 8130 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8131 // CHECK9: omp.inner.for.cond: 8132 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 8133 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !42 8134 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 8135 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8136 // CHECK9: omp.inner.for.body: 8137 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !42 8138 // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 8139 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !42 8140 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 8141 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !42 8142 // CHECK9-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 8143 // CHECK9-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 8144 // CHECK9: omp_if.then: 8145 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !42 8146 // CHECK9-NEXT: br label [[OMP_IF_END:%.*]] 8147 // CHECK9: omp_if.else: 8148 // CHECK9-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !42 8149 // CHECK9-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !42 8150 // CHECK9-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !42 8151 // CHECK9-NEXT: call void @.omp_outlined..9(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !42 8152 // CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !42 8153 // CHECK9-NEXT: br label [[OMP_IF_END]] 8154 // CHECK9: omp_if.end: 8155 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8156 // CHECK9: omp.inner.for.inc: 8157 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 8158 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !42 8159 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 8160 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 8161 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] 8162 // CHECK9: omp.inner.for.end: 8163 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8164 // CHECK9: omp.loop.exit: 8165 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 8166 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8167 // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 8168 // CHECK9-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8169 // CHECK9: .omp.final.then: 8170 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 8171 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8172 // CHECK9: .omp.final.done: 8173 // CHECK9-NEXT: ret void 8174 // 8175 // 8176 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..9 8177 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 8178 // CHECK9-NEXT: entry: 8179 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8180 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8181 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 8182 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 8183 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8184 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8185 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8186 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8187 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8188 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8189 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 8190 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8191 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8192 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8193 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8194 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8195 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 8196 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8197 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 8198 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8199 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 8200 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 8201 // CHECK9-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 8202 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8203 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8204 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8205 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 8206 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8207 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8208 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 8209 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8210 // CHECK9: cond.true: 8211 // CHECK9-NEXT: br label [[COND_END:%.*]] 8212 // CHECK9: cond.false: 8213 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8214 // CHECK9-NEXT: br label [[COND_END]] 8215 // CHECK9: cond.end: 8216 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 8217 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8218 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8219 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 8220 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8221 // CHECK9: omp.inner.for.cond: 8222 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45 8223 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !45 8224 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 8225 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8226 // CHECK9: omp.inner.for.body: 8227 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45 8228 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 8229 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 8230 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !45 8231 // CHECK9-NEXT: call void @_Z3fn6v(), !llvm.access.group !45 8232 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8233 // CHECK9: omp.body.continue: 8234 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8235 // CHECK9: omp.inner.for.inc: 8236 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45 8237 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 8238 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45 8239 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] 8240 // CHECK9: omp.inner.for.end: 8241 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8242 // CHECK9: omp.loop.exit: 8243 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 8244 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8245 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 8246 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8247 // CHECK9: .omp.final.then: 8248 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 8249 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8250 // CHECK9: .omp.final.done: 8251 // CHECK9-NEXT: ret void 8252 // 8253 // 8254 // CHECK9-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 8255 // CHECK9-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 8256 // CHECK9-NEXT: entry: 8257 // CHECK9-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 8258 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8259 // CHECK9-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 8260 // CHECK9-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 8261 // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 8262 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 8263 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 8264 // CHECK9-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 8265 // CHECK9-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 8266 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 8267 // CHECK9-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 8268 // CHECK9-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 8269 // CHECK9-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 8270 // CHECK9: omp_offload.failed: 8271 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59() #[[ATTR2]] 8272 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] 8273 // CHECK9: omp_offload.cont: 8274 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 8275 // CHECK9-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 8276 // CHECK9-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 8277 // CHECK9-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 8278 // CHECK9: omp_offload.failed2: 8279 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65() #[[ATTR2]] 8280 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT3]] 8281 // CHECK9: omp_offload.cont3: 8282 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 8283 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 8284 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 8285 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 8286 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 8287 // CHECK9-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 8288 // CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 8289 // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 8290 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 8291 // CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 8292 // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 8293 // CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 8294 // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 8295 // CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 8296 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 8297 // CHECK9-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 8298 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 8299 // CHECK9-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 8300 // CHECK9: omp_offload.failed5: 8301 // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71(i64 [[TMP5]]) #[[ATTR2]] 8302 // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT6]] 8303 // CHECK9: omp_offload.cont6: 8304 // CHECK9-NEXT: ret i32 0 8305 // 8306 // 8307 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59 8308 // CHECK9-SAME: () #[[ATTR1]] { 8309 // CHECK9-NEXT: entry: 8310 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 8311 // CHECK9-NEXT: ret void 8312 // 8313 // 8314 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 8315 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 8316 // CHECK9-NEXT: entry: 8317 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8318 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8319 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8320 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8321 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 8322 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 8323 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8324 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8325 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 8326 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8327 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8328 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 8329 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 8330 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8331 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8332 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8333 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 8334 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8335 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8336 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 8337 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8338 // CHECK9: cond.true: 8339 // CHECK9-NEXT: br label [[COND_END:%.*]] 8340 // CHECK9: cond.false: 8341 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8342 // CHECK9-NEXT: br label [[COND_END]] 8343 // CHECK9: cond.end: 8344 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 8345 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 8346 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 8347 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 8348 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8349 // CHECK9: omp.inner.for.cond: 8350 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48 8351 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !48 8352 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 8353 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8354 // CHECK9: omp.inner.for.body: 8355 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !48 8356 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 8357 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !48 8358 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 8359 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !48 8360 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8361 // CHECK9: omp.inner.for.inc: 8362 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48 8363 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !48 8364 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 8365 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48 8366 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]] 8367 // CHECK9: omp.inner.for.end: 8368 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8369 // CHECK9: omp.loop.exit: 8370 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 8371 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8372 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 8373 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8374 // CHECK9: .omp.final.then: 8375 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 8376 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8377 // CHECK9: .omp.final.done: 8378 // CHECK9-NEXT: ret void 8379 // 8380 // 8381 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11 8382 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 8383 // CHECK9-NEXT: entry: 8384 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8385 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8386 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 8387 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 8388 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8389 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8390 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8391 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8392 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8393 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8394 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 8395 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8396 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8397 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8398 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8399 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8400 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 8401 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8402 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 8403 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8404 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 8405 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 8406 // CHECK9-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 8407 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8408 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8409 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8410 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 8411 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8412 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8413 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 8414 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8415 // CHECK9: cond.true: 8416 // CHECK9-NEXT: br label [[COND_END:%.*]] 8417 // CHECK9: cond.false: 8418 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8419 // CHECK9-NEXT: br label [[COND_END]] 8420 // CHECK9: cond.end: 8421 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 8422 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8423 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8424 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 8425 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8426 // CHECK9: omp.inner.for.cond: 8427 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 8428 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !51 8429 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 8430 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8431 // CHECK9: omp.inner.for.body: 8432 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 8433 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 8434 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 8435 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !51 8436 // CHECK9-NEXT: call void @_Z3fn1v(), !llvm.access.group !51 8437 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8438 // CHECK9: omp.body.continue: 8439 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8440 // CHECK9: omp.inner.for.inc: 8441 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 8442 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 8443 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 8444 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]] 8445 // CHECK9: omp.inner.for.end: 8446 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8447 // CHECK9: omp.loop.exit: 8448 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 8449 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8450 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 8451 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8452 // CHECK9: .omp.final.then: 8453 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 8454 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8455 // CHECK9: .omp.final.done: 8456 // CHECK9-NEXT: ret void 8457 // 8458 // 8459 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65 8460 // CHECK9-SAME: () #[[ATTR1]] { 8461 // CHECK9-NEXT: entry: 8462 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..12 to void (i32*, i32*, ...)*)) 8463 // CHECK9-NEXT: ret void 8464 // 8465 // 8466 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..12 8467 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 8468 // CHECK9-NEXT: entry: 8469 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8470 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8471 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8472 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8473 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 8474 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 8475 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8476 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8477 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 8478 // CHECK9-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 8479 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8480 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8481 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 8482 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 8483 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8484 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8485 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8486 // CHECK9-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 8487 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8488 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8489 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 8490 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8491 // CHECK9: cond.true: 8492 // CHECK9-NEXT: br label [[COND_END:%.*]] 8493 // CHECK9: cond.false: 8494 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8495 // CHECK9-NEXT: br label [[COND_END]] 8496 // CHECK9: cond.end: 8497 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 8498 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 8499 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 8500 // CHECK9-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 8501 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8502 // CHECK9: omp.inner.for.cond: 8503 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 8504 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !54 8505 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 8506 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8507 // CHECK9: omp.inner.for.body: 8508 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !54 8509 // CHECK9-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 8510 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !54 8511 // CHECK9-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 8512 // CHECK9-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !54 8513 // CHECK9-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !54 8514 // CHECK9-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !54 8515 // CHECK9-NEXT: call void @.omp_outlined..13(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !54 8516 // CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !54 8517 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8518 // CHECK9: omp.inner.for.inc: 8519 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 8520 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !54 8521 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 8522 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 8523 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]] 8524 // CHECK9: omp.inner.for.end: 8525 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8526 // CHECK9: omp.loop.exit: 8527 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 8528 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8529 // CHECK9-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 8530 // CHECK9-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8531 // CHECK9: .omp.final.then: 8532 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 8533 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8534 // CHECK9: .omp.final.done: 8535 // CHECK9-NEXT: ret void 8536 // 8537 // 8538 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..13 8539 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 8540 // CHECK9-NEXT: entry: 8541 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8542 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8543 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 8544 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 8545 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8546 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8547 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8548 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8549 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8550 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8551 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 8552 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8553 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8554 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8555 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8556 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8557 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 8558 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8559 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 8560 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8561 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 8562 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 8563 // CHECK9-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 8564 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8565 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8566 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8567 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 8568 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8569 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8570 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 8571 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8572 // CHECK9: cond.true: 8573 // CHECK9-NEXT: br label [[COND_END:%.*]] 8574 // CHECK9: cond.false: 8575 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8576 // CHECK9-NEXT: br label [[COND_END]] 8577 // CHECK9: cond.end: 8578 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 8579 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8580 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8581 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 8582 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8583 // CHECK9: omp.inner.for.cond: 8584 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57 8585 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !57 8586 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 8587 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8588 // CHECK9: omp.inner.for.body: 8589 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57 8590 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 8591 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 8592 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !57 8593 // CHECK9-NEXT: call void @_Z3fn2v(), !llvm.access.group !57 8594 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8595 // CHECK9: omp.body.continue: 8596 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8597 // CHECK9: omp.inner.for.inc: 8598 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57 8599 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 8600 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57 8601 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]] 8602 // CHECK9: omp.inner.for.end: 8603 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8604 // CHECK9: omp.loop.exit: 8605 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 8606 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8607 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 8608 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8609 // CHECK9: .omp.final.then: 8610 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 8611 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8612 // CHECK9: .omp.final.done: 8613 // CHECK9-NEXT: ret void 8614 // 8615 // 8616 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71 8617 // CHECK9-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 8618 // CHECK9-NEXT: entry: 8619 // CHECK9-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 8620 // CHECK9-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 8621 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 8622 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]]) 8623 // CHECK9-NEXT: ret void 8624 // 8625 // 8626 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..14 8627 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 8628 // CHECK9-NEXT: entry: 8629 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8630 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8631 // CHECK9-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 8632 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8633 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8634 // CHECK9-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 8635 // CHECK9-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 8636 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8637 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8638 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 8639 // CHECK9-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 8640 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8641 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8642 // CHECK9-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 8643 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 8644 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 8645 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 8646 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8647 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8648 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8649 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 8650 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8651 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8652 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 8653 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8654 // CHECK9: cond.true: 8655 // CHECK9-NEXT: br label [[COND_END:%.*]] 8656 // CHECK9: cond.false: 8657 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8658 // CHECK9-NEXT: br label [[COND_END]] 8659 // CHECK9: cond.end: 8660 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 8661 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 8662 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 8663 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 8664 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8665 // CHECK9: omp.inner.for.cond: 8666 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60 8667 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !60 8668 // CHECK9-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 8669 // CHECK9-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8670 // CHECK9: omp.inner.for.body: 8671 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !60 8672 // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 8673 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !60 8674 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 8675 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !60 8676 // CHECK9-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 8677 // CHECK9-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 8678 // CHECK9: omp_if.then: 8679 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !60 8680 // CHECK9-NEXT: br label [[OMP_IF_END:%.*]] 8681 // CHECK9: omp_if.else: 8682 // CHECK9-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !60 8683 // CHECK9-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !60 8684 // CHECK9-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !60 8685 // CHECK9-NEXT: call void @.omp_outlined..15(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !60 8686 // CHECK9-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !60 8687 // CHECK9-NEXT: br label [[OMP_IF_END]] 8688 // CHECK9: omp_if.end: 8689 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8690 // CHECK9: omp.inner.for.inc: 8691 // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60 8692 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !60 8693 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 8694 // CHECK9-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60 8695 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP61:![0-9]+]] 8696 // CHECK9: omp.inner.for.end: 8697 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8698 // CHECK9: omp.loop.exit: 8699 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 8700 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8701 // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 8702 // CHECK9-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8703 // CHECK9: .omp.final.then: 8704 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 8705 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8706 // CHECK9: .omp.final.done: 8707 // CHECK9-NEXT: ret void 8708 // 8709 // 8710 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15 8711 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 8712 // CHECK9-NEXT: entry: 8713 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8714 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8715 // CHECK9-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 8716 // CHECK9-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 8717 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8718 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 8719 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8720 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8721 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8722 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8723 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 8724 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8725 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8726 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8727 // CHECK9-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8728 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8729 // CHECK9-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 8730 // CHECK9-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8731 // CHECK9-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 8732 // CHECK9-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8733 // CHECK9-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 8734 // CHECK9-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 8735 // CHECK9-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 8736 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8737 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8738 // CHECK9-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8739 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 8740 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8741 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8742 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 8743 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8744 // CHECK9: cond.true: 8745 // CHECK9-NEXT: br label [[COND_END:%.*]] 8746 // CHECK9: cond.false: 8747 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8748 // CHECK9-NEXT: br label [[COND_END]] 8749 // CHECK9: cond.end: 8750 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 8751 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8752 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8753 // CHECK9-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 8754 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8755 // CHECK9: omp.inner.for.cond: 8756 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63 8757 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !63 8758 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 8759 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8760 // CHECK9: omp.inner.for.body: 8761 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63 8762 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 8763 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 8764 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !63 8765 // CHECK9-NEXT: call void @_Z3fn3v(), !llvm.access.group !63 8766 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8767 // CHECK9: omp.body.continue: 8768 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8769 // CHECK9: omp.inner.for.inc: 8770 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63 8771 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 8772 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63 8773 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP64:![0-9]+]] 8774 // CHECK9: omp.inner.for.end: 8775 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8776 // CHECK9: omp.loop.exit: 8777 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 8778 // CHECK9-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8779 // CHECK9-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 8780 // CHECK9-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8781 // CHECK9: .omp.final.then: 8782 // CHECK9-NEXT: store i32 100, i32* [[I]], align 4 8783 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 8784 // CHECK9: .omp.final.done: 8785 // CHECK9-NEXT: ret void 8786 // 8787 // 8788 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 8789 // CHECK9-SAME: () #[[ATTR5:[0-9]+]] { 8790 // CHECK9-NEXT: entry: 8791 // CHECK9-NEXT: call void @__tgt_register_requires(i64 1) 8792 // CHECK9-NEXT: ret void 8793 // 8794 // 8795 // CHECK10-LABEL: define {{[^@]+}}@_Z9gtid_testv 8796 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] { 8797 // CHECK10-NEXT: entry: 8798 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 8799 // CHECK10-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 8800 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100) 8801 // CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 8802 // CHECK10-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 8803 // CHECK10-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 8804 // CHECK10: omp_offload.failed: 8805 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43() #[[ATTR2:[0-9]+]] 8806 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] 8807 // CHECK10: omp_offload.cont: 8808 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 8809 // CHECK10-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 8810 // CHECK10-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 8811 // CHECK10-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 8812 // CHECK10: omp_offload.failed2: 8813 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48() #[[ATTR2]] 8814 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT3]] 8815 // CHECK10: omp_offload.cont3: 8816 // CHECK10-NEXT: ret void 8817 // 8818 // 8819 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43 8820 // CHECK10-SAME: () #[[ATTR1:[0-9]+]] { 8821 // CHECK10-NEXT: entry: 8822 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 8823 // CHECK10-NEXT: ret void 8824 // 8825 // 8826 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined. 8827 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 8828 // CHECK10-NEXT: entry: 8829 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8830 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8831 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8832 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 8833 // CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 8834 // CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 8835 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8836 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8837 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 8838 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8839 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8840 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 8841 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 8842 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8843 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8844 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8845 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 8846 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8847 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8848 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 8849 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8850 // CHECK10: cond.true: 8851 // CHECK10-NEXT: br label [[COND_END:%.*]] 8852 // CHECK10: cond.false: 8853 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 8854 // CHECK10-NEXT: br label [[COND_END]] 8855 // CHECK10: cond.end: 8856 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 8857 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 8858 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 8859 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 8860 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8861 // CHECK10: omp.inner.for.cond: 8862 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 8863 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 8864 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 8865 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8866 // CHECK10: omp.inner.for.body: 8867 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 8868 // CHECK10-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 8869 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 8870 // CHECK10-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 8871 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !15 8872 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8873 // CHECK10: omp.inner.for.inc: 8874 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 8875 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 8876 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 8877 // CHECK10-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 8878 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 8879 // CHECK10: omp.inner.for.end: 8880 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8881 // CHECK10: omp.loop.exit: 8882 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 8883 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8884 // CHECK10-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 8885 // CHECK10-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8886 // CHECK10: .omp.final.then: 8887 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 8888 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 8889 // CHECK10: .omp.final.done: 8890 // CHECK10-NEXT: ret void 8891 // 8892 // 8893 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..1 8894 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 8895 // CHECK10-NEXT: entry: 8896 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8897 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8898 // CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 8899 // CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 8900 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8901 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 8902 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 8903 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 8904 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8905 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8906 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 8907 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8908 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8909 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8910 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8911 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 8912 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 8913 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 8914 // CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 8915 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 8916 // CHECK10-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 8917 // CHECK10-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 8918 // CHECK10-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 8919 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8920 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8921 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8922 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 8923 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8924 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8925 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 8926 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 8927 // CHECK10: cond.true: 8928 // CHECK10-NEXT: br label [[COND_END:%.*]] 8929 // CHECK10: cond.false: 8930 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 8931 // CHECK10-NEXT: br label [[COND_END]] 8932 // CHECK10: cond.end: 8933 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 8934 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 8935 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 8936 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 8937 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 8938 // CHECK10: omp.inner.for.cond: 8939 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 8940 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19 8941 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 8942 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 8943 // CHECK10: omp.inner.for.body: 8944 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 8945 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 8946 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 8947 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19 8948 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 8949 // CHECK10: omp.body.continue: 8950 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 8951 // CHECK10: omp.inner.for.inc: 8952 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 8953 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 8954 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 8955 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 8956 // CHECK10: omp.inner.for.end: 8957 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 8958 // CHECK10: omp.loop.exit: 8959 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 8960 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 8961 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 8962 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 8963 // CHECK10: .omp.final.then: 8964 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 8965 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 8966 // CHECK10: .omp.final.done: 8967 // CHECK10-NEXT: ret void 8968 // 8969 // 8970 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48 8971 // CHECK10-SAME: () #[[ATTR1]] { 8972 // CHECK10-NEXT: entry: 8973 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*)) 8974 // CHECK10-NEXT: ret void 8975 // 8976 // 8977 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..2 8978 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 8979 // CHECK10-NEXT: entry: 8980 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 8981 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 8982 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 8983 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 8984 // CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 8985 // CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 8986 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 8987 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 8988 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 8989 // CHECK10-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 8990 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 8991 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 8992 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 8993 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 8994 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 8995 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 8996 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 8997 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 8998 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 8999 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9000 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 9001 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9002 // CHECK10: cond.true: 9003 // CHECK10-NEXT: br label [[COND_END:%.*]] 9004 // CHECK10: cond.false: 9005 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9006 // CHECK10-NEXT: br label [[COND_END]] 9007 // CHECK10: cond.end: 9008 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 9009 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 9010 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 9011 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 9012 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9013 // CHECK10: omp.inner.for.cond: 9014 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 9015 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24 9016 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 9017 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9018 // CHECK10: omp.inner.for.body: 9019 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !24 9020 // CHECK10-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 9021 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24 9022 // CHECK10-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 9023 // CHECK10-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !24 9024 // CHECK10-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !24 9025 // CHECK10-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !24 9026 // CHECK10-NEXT: call void @.omp_outlined..3(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !24 9027 // CHECK10-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !24 9028 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9029 // CHECK10: omp.inner.for.inc: 9030 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 9031 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !24 9032 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 9033 // CHECK10-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 9034 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 9035 // CHECK10: omp.inner.for.end: 9036 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9037 // CHECK10: omp.loop.exit: 9038 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 9039 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9040 // CHECK10-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 9041 // CHECK10-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9042 // CHECK10: .omp.final.then: 9043 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9044 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9045 // CHECK10: .omp.final.done: 9046 // CHECK10-NEXT: ret void 9047 // 9048 // 9049 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3 9050 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 9051 // CHECK10-NEXT: entry: 9052 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9053 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9054 // CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 9055 // CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 9056 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9057 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9058 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9059 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9060 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9061 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9062 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9063 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9064 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9065 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9066 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9067 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9068 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 9069 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9070 // CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 9071 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9072 // CHECK10-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 9073 // CHECK10-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 9074 // CHECK10-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 9075 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9076 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9077 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9078 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 9079 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9080 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9081 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 9082 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9083 // CHECK10: cond.true: 9084 // CHECK10-NEXT: br label [[COND_END:%.*]] 9085 // CHECK10: cond.false: 9086 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9087 // CHECK10-NEXT: br label [[COND_END]] 9088 // CHECK10: cond.end: 9089 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 9090 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9091 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9092 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 9093 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9094 // CHECK10: omp.inner.for.cond: 9095 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 9096 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 9097 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 9098 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9099 // CHECK10: omp.inner.for.body: 9100 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 9101 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 9102 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 9103 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27 9104 // CHECK10-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !27 9105 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9106 // CHECK10: omp.body.continue: 9107 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9108 // CHECK10: omp.inner.for.inc: 9109 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 9110 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 9111 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 9112 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 9113 // CHECK10: omp.inner.for.end: 9114 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9115 // CHECK10: omp.loop.exit: 9116 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 9117 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9118 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 9119 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9120 // CHECK10: .omp.final.then: 9121 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9122 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9123 // CHECK10: .omp.final.done: 9124 // CHECK10-NEXT: ret void 9125 // 9126 // 9127 // CHECK10-LABEL: define {{[^@]+}}@main 9128 // CHECK10-SAME: () #[[ATTR3:[0-9]+]] { 9129 // CHECK10-NEXT: entry: 9130 // CHECK10-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 9131 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9132 // CHECK10-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 9133 // CHECK10-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 9134 // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 9135 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 9136 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 9137 // CHECK10-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 9138 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 9139 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 9140 // CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 9141 // CHECK10-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 9142 // CHECK10-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 9143 // CHECK10: omp_offload.failed: 9144 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81() #[[ATTR2]] 9145 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] 9146 // CHECK10: omp_offload.cont: 9147 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 9148 // CHECK10-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 9149 // CHECK10-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 9150 // CHECK10-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 9151 // CHECK10: omp_offload.failed2: 9152 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90() #[[ATTR2]] 9153 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT3]] 9154 // CHECK10: omp_offload.cont3: 9155 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* @Arg, align 4 9156 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 9157 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 9158 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 9159 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9160 // CHECK10-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 9161 // CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 9162 // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9163 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 9164 // CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 9165 // CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 9166 // CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 9167 // CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9168 // CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9169 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 9170 // CHECK10-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 9171 // CHECK10-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 9172 // CHECK10-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 9173 // CHECK10: omp_offload.failed5: 9174 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99(i64 [[TMP5]]) #[[ATTR2]] 9175 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT6]] 9176 // CHECK10: omp_offload.cont6: 9177 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 9178 // CHECK10-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 9179 // CHECK10-NEXT: ret i32 [[CALL]] 9180 // 9181 // 9182 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 9183 // CHECK10-SAME: () #[[ATTR1]] { 9184 // CHECK10-NEXT: entry: 9185 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 9186 // CHECK10-NEXT: ret void 9187 // 9188 // 9189 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..4 9190 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 9191 // CHECK10-NEXT: entry: 9192 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9193 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9194 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9195 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9196 // CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 9197 // CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 9198 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9199 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9200 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9201 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9202 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9203 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 9204 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 9205 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9206 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9207 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9208 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 9209 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9210 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9211 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 9212 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9213 // CHECK10: cond.true: 9214 // CHECK10-NEXT: br label [[COND_END:%.*]] 9215 // CHECK10: cond.false: 9216 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9217 // CHECK10-NEXT: br label [[COND_END]] 9218 // CHECK10: cond.end: 9219 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 9220 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 9221 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 9222 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 9223 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9224 // CHECK10: omp.inner.for.cond: 9225 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 9226 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 9227 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 9228 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9229 // CHECK10: omp.inner.for.body: 9230 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !30 9231 // CHECK10-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 9232 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 9233 // CHECK10-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 9234 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !30 9235 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9236 // CHECK10: omp.inner.for.inc: 9237 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 9238 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !30 9239 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 9240 // CHECK10-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 9241 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 9242 // CHECK10: omp.inner.for.end: 9243 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9244 // CHECK10: omp.loop.exit: 9245 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 9246 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9247 // CHECK10-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 9248 // CHECK10-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9249 // CHECK10: .omp.final.then: 9250 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9251 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9252 // CHECK10: .omp.final.done: 9253 // CHECK10-NEXT: ret void 9254 // 9255 // 9256 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 9257 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 9258 // CHECK10-NEXT: entry: 9259 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9260 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9261 // CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 9262 // CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 9263 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9264 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9265 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9266 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9267 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9268 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9269 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9270 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9271 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9272 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9273 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9274 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9275 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 9276 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9277 // CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 9278 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9279 // CHECK10-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 9280 // CHECK10-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 9281 // CHECK10-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 9282 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9283 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9284 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9285 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 9286 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9287 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9288 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 9289 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9290 // CHECK10: cond.true: 9291 // CHECK10-NEXT: br label [[COND_END:%.*]] 9292 // CHECK10: cond.false: 9293 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9294 // CHECK10-NEXT: br label [[COND_END]] 9295 // CHECK10: cond.end: 9296 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 9297 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9298 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9299 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 9300 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9301 // CHECK10: omp.inner.for.cond: 9302 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 9303 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33 9304 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 9305 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9306 // CHECK10: omp.inner.for.body: 9307 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 9308 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 9309 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 9310 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33 9311 // CHECK10-NEXT: call void @_Z3fn4v(), !llvm.access.group !33 9312 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9313 // CHECK10: omp.body.continue: 9314 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9315 // CHECK10: omp.inner.for.inc: 9316 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 9317 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 9318 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 9319 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 9320 // CHECK10: omp.inner.for.end: 9321 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9322 // CHECK10: omp.loop.exit: 9323 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 9324 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9325 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 9326 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9327 // CHECK10: .omp.final.then: 9328 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9329 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9330 // CHECK10: .omp.final.done: 9331 // CHECK10-NEXT: ret void 9332 // 9333 // 9334 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90 9335 // CHECK10-SAME: () #[[ATTR1]] { 9336 // CHECK10-NEXT: entry: 9337 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*)) 9338 // CHECK10-NEXT: ret void 9339 // 9340 // 9341 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 9342 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 9343 // CHECK10-NEXT: entry: 9344 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9345 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9346 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9347 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9348 // CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 9349 // CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 9350 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9351 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9352 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9353 // CHECK10-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 9354 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9355 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9356 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 9357 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 9358 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9359 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9360 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9361 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 9362 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9363 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9364 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 9365 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9366 // CHECK10: cond.true: 9367 // CHECK10-NEXT: br label [[COND_END:%.*]] 9368 // CHECK10: cond.false: 9369 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9370 // CHECK10-NEXT: br label [[COND_END]] 9371 // CHECK10: cond.end: 9372 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 9373 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 9374 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 9375 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 9376 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9377 // CHECK10: omp.inner.for.cond: 9378 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 9379 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36 9380 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 9381 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9382 // CHECK10: omp.inner.for.body: 9383 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !36 9384 // CHECK10-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 9385 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36 9386 // CHECK10-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 9387 // CHECK10-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !36 9388 // CHECK10-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !36 9389 // CHECK10-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !36 9390 // CHECK10-NEXT: call void @.omp_outlined..7(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !36 9391 // CHECK10-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !36 9392 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9393 // CHECK10: omp.inner.for.inc: 9394 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 9395 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !36 9396 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 9397 // CHECK10-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 9398 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP37:![0-9]+]] 9399 // CHECK10: omp.inner.for.end: 9400 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9401 // CHECK10: omp.loop.exit: 9402 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 9403 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9404 // CHECK10-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 9405 // CHECK10-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9406 // CHECK10: .omp.final.then: 9407 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9408 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9409 // CHECK10: .omp.final.done: 9410 // CHECK10-NEXT: ret void 9411 // 9412 // 9413 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 9414 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 9415 // CHECK10-NEXT: entry: 9416 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9417 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9418 // CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 9419 // CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 9420 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9421 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9422 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9423 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9424 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9425 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9426 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9427 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9428 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9429 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9430 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9431 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9432 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 9433 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9434 // CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 9435 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9436 // CHECK10-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 9437 // CHECK10-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 9438 // CHECK10-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 9439 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9440 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9441 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9442 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 9443 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9444 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9445 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 9446 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9447 // CHECK10: cond.true: 9448 // CHECK10-NEXT: br label [[COND_END:%.*]] 9449 // CHECK10: cond.false: 9450 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9451 // CHECK10-NEXT: br label [[COND_END]] 9452 // CHECK10: cond.end: 9453 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 9454 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9455 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9456 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 9457 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9458 // CHECK10: omp.inner.for.cond: 9459 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 9460 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !39 9461 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 9462 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9463 // CHECK10: omp.inner.for.body: 9464 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 9465 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 9466 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 9467 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !39 9468 // CHECK10-NEXT: call void @_Z3fn5v(), !llvm.access.group !39 9469 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9470 // CHECK10: omp.body.continue: 9471 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9472 // CHECK10: omp.inner.for.inc: 9473 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 9474 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 9475 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 9476 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 9477 // CHECK10: omp.inner.for.end: 9478 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9479 // CHECK10: omp.loop.exit: 9480 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 9481 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9482 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 9483 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9484 // CHECK10: .omp.final.then: 9485 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9486 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9487 // CHECK10: .omp.final.done: 9488 // CHECK10-NEXT: ret void 9489 // 9490 // 9491 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99 9492 // CHECK10-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 9493 // CHECK10-NEXT: entry: 9494 // CHECK10-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 9495 // CHECK10-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 9496 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 9497 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]]) 9498 // CHECK10-NEXT: ret void 9499 // 9500 // 9501 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..8 9502 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 9503 // CHECK10-NEXT: entry: 9504 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9505 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9506 // CHECK10-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 9507 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9508 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9509 // CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 9510 // CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 9511 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9512 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9513 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9514 // CHECK10-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 9515 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9516 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9517 // CHECK10-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 9518 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 9519 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 9520 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 9521 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9522 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9523 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9524 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 9525 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9526 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9527 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 9528 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9529 // CHECK10: cond.true: 9530 // CHECK10-NEXT: br label [[COND_END:%.*]] 9531 // CHECK10: cond.false: 9532 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9533 // CHECK10-NEXT: br label [[COND_END]] 9534 // CHECK10: cond.end: 9535 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 9536 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 9537 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 9538 // CHECK10-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 9539 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9540 // CHECK10: omp.inner.for.cond: 9541 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 9542 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !42 9543 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 9544 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9545 // CHECK10: omp.inner.for.body: 9546 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !42 9547 // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 9548 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !42 9549 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 9550 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !42 9551 // CHECK10-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 9552 // CHECK10-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 9553 // CHECK10: omp_if.then: 9554 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !42 9555 // CHECK10-NEXT: br label [[OMP_IF_END:%.*]] 9556 // CHECK10: omp_if.else: 9557 // CHECK10-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !42 9558 // CHECK10-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !42 9559 // CHECK10-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !42 9560 // CHECK10-NEXT: call void @.omp_outlined..9(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !42 9561 // CHECK10-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !42 9562 // CHECK10-NEXT: br label [[OMP_IF_END]] 9563 // CHECK10: omp_if.end: 9564 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9565 // CHECK10: omp.inner.for.inc: 9566 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 9567 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !42 9568 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 9569 // CHECK10-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 9570 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP43:![0-9]+]] 9571 // CHECK10: omp.inner.for.end: 9572 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9573 // CHECK10: omp.loop.exit: 9574 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 9575 // CHECK10-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9576 // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 9577 // CHECK10-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9578 // CHECK10: .omp.final.then: 9579 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9580 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9581 // CHECK10: .omp.final.done: 9582 // CHECK10-NEXT: ret void 9583 // 9584 // 9585 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..9 9586 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 9587 // CHECK10-NEXT: entry: 9588 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9589 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9590 // CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 9591 // CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 9592 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9593 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9594 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9595 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9596 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9597 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9598 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9599 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9600 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9601 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9602 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9603 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9604 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 9605 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9606 // CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 9607 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9608 // CHECK10-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 9609 // CHECK10-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 9610 // CHECK10-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 9611 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9612 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9613 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9614 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 9615 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9616 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9617 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 9618 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9619 // CHECK10: cond.true: 9620 // CHECK10-NEXT: br label [[COND_END:%.*]] 9621 // CHECK10: cond.false: 9622 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9623 // CHECK10-NEXT: br label [[COND_END]] 9624 // CHECK10: cond.end: 9625 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 9626 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9627 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9628 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 9629 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9630 // CHECK10: omp.inner.for.cond: 9631 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45 9632 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !45 9633 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 9634 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9635 // CHECK10: omp.inner.for.body: 9636 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45 9637 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 9638 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 9639 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !45 9640 // CHECK10-NEXT: call void @_Z3fn6v(), !llvm.access.group !45 9641 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9642 // CHECK10: omp.body.continue: 9643 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9644 // CHECK10: omp.inner.for.inc: 9645 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45 9646 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 9647 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !45 9648 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP46:![0-9]+]] 9649 // CHECK10: omp.inner.for.end: 9650 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9651 // CHECK10: omp.loop.exit: 9652 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 9653 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9654 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 9655 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9656 // CHECK10: .omp.final.then: 9657 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9658 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9659 // CHECK10: .omp.final.done: 9660 // CHECK10-NEXT: ret void 9661 // 9662 // 9663 // CHECK10-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 9664 // CHECK10-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 9665 // CHECK10-NEXT: entry: 9666 // CHECK10-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 9667 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9668 // CHECK10-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 9669 // CHECK10-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 9670 // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 9671 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 9672 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 9673 // CHECK10-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 9674 // CHECK10-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 9675 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 9676 // CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 9677 // CHECK10-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 9678 // CHECK10-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 9679 // CHECK10: omp_offload.failed: 9680 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59() #[[ATTR2]] 9681 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] 9682 // CHECK10: omp_offload.cont: 9683 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 9684 // CHECK10-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 9685 // CHECK10-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 9686 // CHECK10-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 9687 // CHECK10: omp_offload.failed2: 9688 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65() #[[ATTR2]] 9689 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT3]] 9690 // CHECK10: omp_offload.cont3: 9691 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 9692 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 9693 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 9694 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 9695 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9696 // CHECK10-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 9697 // CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 9698 // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9699 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 9700 // CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 9701 // CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 9702 // CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 9703 // CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 9704 // CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 9705 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 9706 // CHECK10-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 9707 // CHECK10-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 9708 // CHECK10-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 9709 // CHECK10: omp_offload.failed5: 9710 // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71(i64 [[TMP5]]) #[[ATTR2]] 9711 // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT6]] 9712 // CHECK10: omp_offload.cont6: 9713 // CHECK10-NEXT: ret i32 0 9714 // 9715 // 9716 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59 9717 // CHECK10-SAME: () #[[ATTR1]] { 9718 // CHECK10-NEXT: entry: 9719 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*)) 9720 // CHECK10-NEXT: ret void 9721 // 9722 // 9723 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 9724 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 9725 // CHECK10-NEXT: entry: 9726 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9727 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9728 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9729 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9730 // CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 9731 // CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 9732 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9733 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9734 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9735 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9736 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9737 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 9738 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 9739 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9740 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9741 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9742 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 9743 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9744 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9745 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 9746 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9747 // CHECK10: cond.true: 9748 // CHECK10-NEXT: br label [[COND_END:%.*]] 9749 // CHECK10: cond.false: 9750 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9751 // CHECK10-NEXT: br label [[COND_END]] 9752 // CHECK10: cond.end: 9753 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 9754 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 9755 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 9756 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 9757 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9758 // CHECK10: omp.inner.for.cond: 9759 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48 9760 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !48 9761 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 9762 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9763 // CHECK10: omp.inner.for.body: 9764 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !48 9765 // CHECK10-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 9766 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !48 9767 // CHECK10-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 9768 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !48 9769 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9770 // CHECK10: omp.inner.for.inc: 9771 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48 9772 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !48 9773 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 9774 // CHECK10-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !48 9775 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP49:![0-9]+]] 9776 // CHECK10: omp.inner.for.end: 9777 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9778 // CHECK10: omp.loop.exit: 9779 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 9780 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9781 // CHECK10-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 9782 // CHECK10-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9783 // CHECK10: .omp.final.then: 9784 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9785 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9786 // CHECK10: .omp.final.done: 9787 // CHECK10-NEXT: ret void 9788 // 9789 // 9790 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11 9791 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 9792 // CHECK10-NEXT: entry: 9793 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9794 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9795 // CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 9796 // CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 9797 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9798 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9799 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9800 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9801 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9802 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9803 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9804 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9805 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9806 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9807 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9808 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9809 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 9810 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9811 // CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 9812 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9813 // CHECK10-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 9814 // CHECK10-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 9815 // CHECK10-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 9816 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9817 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9818 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9819 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 9820 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9821 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9822 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 9823 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9824 // CHECK10: cond.true: 9825 // CHECK10-NEXT: br label [[COND_END:%.*]] 9826 // CHECK10: cond.false: 9827 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9828 // CHECK10-NEXT: br label [[COND_END]] 9829 // CHECK10: cond.end: 9830 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 9831 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9832 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9833 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 9834 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9835 // CHECK10: omp.inner.for.cond: 9836 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 9837 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !51 9838 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 9839 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9840 // CHECK10: omp.inner.for.body: 9841 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 9842 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 9843 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 9844 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !51 9845 // CHECK10-NEXT: call void @_Z3fn1v(), !llvm.access.group !51 9846 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 9847 // CHECK10: omp.body.continue: 9848 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9849 // CHECK10: omp.inner.for.inc: 9850 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 9851 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 9852 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 9853 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]] 9854 // CHECK10: omp.inner.for.end: 9855 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9856 // CHECK10: omp.loop.exit: 9857 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 9858 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9859 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 9860 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9861 // CHECK10: .omp.final.then: 9862 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9863 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9864 // CHECK10: .omp.final.done: 9865 // CHECK10-NEXT: ret void 9866 // 9867 // 9868 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65 9869 // CHECK10-SAME: () #[[ATTR1]] { 9870 // CHECK10-NEXT: entry: 9871 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..12 to void (i32*, i32*, ...)*)) 9872 // CHECK10-NEXT: ret void 9873 // 9874 // 9875 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..12 9876 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 9877 // CHECK10-NEXT: entry: 9878 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9879 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9880 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9881 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9882 // CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 9883 // CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 9884 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9885 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9886 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9887 // CHECK10-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 9888 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9889 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9890 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 9891 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 9892 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9893 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9894 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9895 // CHECK10-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 9896 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9897 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9898 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 9899 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9900 // CHECK10: cond.true: 9901 // CHECK10-NEXT: br label [[COND_END:%.*]] 9902 // CHECK10: cond.false: 9903 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 9904 // CHECK10-NEXT: br label [[COND_END]] 9905 // CHECK10: cond.end: 9906 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 9907 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 9908 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 9909 // CHECK10-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 9910 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9911 // CHECK10: omp.inner.for.cond: 9912 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 9913 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !54 9914 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 9915 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9916 // CHECK10: omp.inner.for.body: 9917 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !54 9918 // CHECK10-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 9919 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !54 9920 // CHECK10-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 9921 // CHECK10-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !54 9922 // CHECK10-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !54 9923 // CHECK10-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !54 9924 // CHECK10-NEXT: call void @.omp_outlined..13(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !54 9925 // CHECK10-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !54 9926 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 9927 // CHECK10: omp.inner.for.inc: 9928 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 9929 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !54 9930 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 9931 // CHECK10-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 9932 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]] 9933 // CHECK10: omp.inner.for.end: 9934 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 9935 // CHECK10: omp.loop.exit: 9936 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 9937 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 9938 // CHECK10-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 9939 // CHECK10-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 9940 // CHECK10: .omp.final.then: 9941 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 9942 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 9943 // CHECK10: .omp.final.done: 9944 // CHECK10-NEXT: ret void 9945 // 9946 // 9947 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..13 9948 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 9949 // CHECK10-NEXT: entry: 9950 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 9951 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 9952 // CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 9953 // CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 9954 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 9955 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 9956 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 9957 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 9958 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 9959 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 9960 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 9961 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 9962 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 9963 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9964 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9965 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 9966 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 9967 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 9968 // CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 9969 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 9970 // CHECK10-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 9971 // CHECK10-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 9972 // CHECK10-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 9973 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 9974 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 9975 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 9976 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 9977 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 9978 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9979 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 9980 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 9981 // CHECK10: cond.true: 9982 // CHECK10-NEXT: br label [[COND_END:%.*]] 9983 // CHECK10: cond.false: 9984 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 9985 // CHECK10-NEXT: br label [[COND_END]] 9986 // CHECK10: cond.end: 9987 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 9988 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 9989 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 9990 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 9991 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 9992 // CHECK10: omp.inner.for.cond: 9993 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57 9994 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !57 9995 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 9996 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 9997 // CHECK10: omp.inner.for.body: 9998 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57 9999 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 10000 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 10001 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !57 10002 // CHECK10-NEXT: call void @_Z3fn2v(), !llvm.access.group !57 10003 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10004 // CHECK10: omp.body.continue: 10005 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10006 // CHECK10: omp.inner.for.inc: 10007 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57 10008 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 10009 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !57 10010 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]] 10011 // CHECK10: omp.inner.for.end: 10012 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10013 // CHECK10: omp.loop.exit: 10014 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 10015 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10016 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 10017 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10018 // CHECK10: .omp.final.then: 10019 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 10020 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 10021 // CHECK10: .omp.final.done: 10022 // CHECK10-NEXT: ret void 10023 // 10024 // 10025 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71 10026 // CHECK10-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 10027 // CHECK10-NEXT: entry: 10028 // CHECK10-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 10029 // CHECK10-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 10030 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 10031 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV]]) 10032 // CHECK10-NEXT: ret void 10033 // 10034 // 10035 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..14 10036 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 10037 // CHECK10-NEXT: entry: 10038 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10039 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10040 // CHECK10-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 10041 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10042 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 10043 // CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 10044 // CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 10045 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10046 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10047 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 10048 // CHECK10-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 10049 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10050 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10051 // CHECK10-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 10052 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 10053 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 10054 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 10055 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10056 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10057 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10058 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 10059 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10060 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10061 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 10062 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10063 // CHECK10: cond.true: 10064 // CHECK10-NEXT: br label [[COND_END:%.*]] 10065 // CHECK10: cond.false: 10066 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10067 // CHECK10-NEXT: br label [[COND_END]] 10068 // CHECK10: cond.end: 10069 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 10070 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 10071 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 10072 // CHECK10-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 10073 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10074 // CHECK10: omp.inner.for.cond: 10075 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60 10076 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !60 10077 // CHECK10-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 10078 // CHECK10-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10079 // CHECK10: omp.inner.for.body: 10080 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !60 10081 // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 10082 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !60 10083 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 10084 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !60 10085 // CHECK10-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 10086 // CHECK10-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 10087 // CHECK10: omp_if.then: 10088 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !60 10089 // CHECK10-NEXT: br label [[OMP_IF_END:%.*]] 10090 // CHECK10: omp_if.else: 10091 // CHECK10-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !60 10092 // CHECK10-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !60 10093 // CHECK10-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !60 10094 // CHECK10-NEXT: call void @.omp_outlined..15(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !60 10095 // CHECK10-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !60 10096 // CHECK10-NEXT: br label [[OMP_IF_END]] 10097 // CHECK10: omp_if.end: 10098 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10099 // CHECK10: omp.inner.for.inc: 10100 // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60 10101 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !60 10102 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 10103 // CHECK10-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !60 10104 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP61:![0-9]+]] 10105 // CHECK10: omp.inner.for.end: 10106 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10107 // CHECK10: omp.loop.exit: 10108 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 10109 // CHECK10-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10110 // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 10111 // CHECK10-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10112 // CHECK10: .omp.final.then: 10113 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 10114 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 10115 // CHECK10: .omp.final.done: 10116 // CHECK10-NEXT: ret void 10117 // 10118 // 10119 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15 10120 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 10121 // CHECK10-NEXT: entry: 10122 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10123 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10124 // CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 10125 // CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 10126 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10127 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 10128 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10129 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10130 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10131 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10132 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 10133 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10134 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10135 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10136 // CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10137 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10138 // CHECK10-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 10139 // CHECK10-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10140 // CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 10141 // CHECK10-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10142 // CHECK10-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 10143 // CHECK10-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 10144 // CHECK10-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 10145 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10146 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10147 // CHECK10-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10148 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 10149 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10150 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10151 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 10152 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10153 // CHECK10: cond.true: 10154 // CHECK10-NEXT: br label [[COND_END:%.*]] 10155 // CHECK10: cond.false: 10156 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10157 // CHECK10-NEXT: br label [[COND_END]] 10158 // CHECK10: cond.end: 10159 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 10160 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10161 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10162 // CHECK10-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 10163 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10164 // CHECK10: omp.inner.for.cond: 10165 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63 10166 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !63 10167 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 10168 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10169 // CHECK10: omp.inner.for.body: 10170 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63 10171 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 10172 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 10173 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !63 10174 // CHECK10-NEXT: call void @_Z3fn3v(), !llvm.access.group !63 10175 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10176 // CHECK10: omp.body.continue: 10177 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10178 // CHECK10: omp.inner.for.inc: 10179 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63 10180 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 10181 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !63 10182 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP64:![0-9]+]] 10183 // CHECK10: omp.inner.for.end: 10184 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10185 // CHECK10: omp.loop.exit: 10186 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 10187 // CHECK10-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10188 // CHECK10-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 10189 // CHECK10-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10190 // CHECK10: .omp.final.then: 10191 // CHECK10-NEXT: store i32 100, i32* [[I]], align 4 10192 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 10193 // CHECK10: .omp.final.done: 10194 // CHECK10-NEXT: ret void 10195 // 10196 // 10197 // CHECK10-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 10198 // CHECK10-SAME: () #[[ATTR5:[0-9]+]] { 10199 // CHECK10-NEXT: entry: 10200 // CHECK10-NEXT: call void @__tgt_register_requires(i64 1) 10201 // CHECK10-NEXT: ret void 10202 // 10203 // 10204 // CHECK11-LABEL: define {{[^@]+}}@_Z9gtid_testv 10205 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] { 10206 // CHECK11-NEXT: entry: 10207 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10208 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 10209 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100) 10210 // CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 10211 // CHECK11-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 10212 // CHECK11-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 10213 // CHECK11: omp_offload.failed: 10214 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43() #[[ATTR2:[0-9]+]] 10215 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] 10216 // CHECK11: omp_offload.cont: 10217 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 10218 // CHECK11-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 10219 // CHECK11-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 10220 // CHECK11-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 10221 // CHECK11: omp_offload.failed2: 10222 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48() #[[ATTR2]] 10223 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT3]] 10224 // CHECK11: omp_offload.cont3: 10225 // CHECK11-NEXT: ret void 10226 // 10227 // 10228 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43 10229 // CHECK11-SAME: () #[[ATTR1:[0-9]+]] { 10230 // CHECK11-NEXT: entry: 10231 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 10232 // CHECK11-NEXT: ret void 10233 // 10234 // 10235 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined. 10236 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 10237 // CHECK11-NEXT: entry: 10238 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10239 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10240 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10241 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10242 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 10243 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 10244 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10245 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10246 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 10247 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10248 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10249 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 10250 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 10251 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10252 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10253 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10254 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 10255 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10256 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10257 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 10258 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10259 // CHECK11: cond.true: 10260 // CHECK11-NEXT: br label [[COND_END:%.*]] 10261 // CHECK11: cond.false: 10262 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10263 // CHECK11-NEXT: br label [[COND_END]] 10264 // CHECK11: cond.end: 10265 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 10266 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 10267 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 10268 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 10269 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10270 // CHECK11: omp.inner.for.cond: 10271 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 10272 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 10273 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 10274 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10275 // CHECK11: omp.inner.for.body: 10276 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 10277 // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 10278 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 10279 // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 10280 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !15 10281 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10282 // CHECK11: omp.inner.for.inc: 10283 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 10284 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 10285 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 10286 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 10287 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 10288 // CHECK11: omp.inner.for.end: 10289 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10290 // CHECK11: omp.loop.exit: 10291 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 10292 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10293 // CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 10294 // CHECK11-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10295 // CHECK11: .omp.final.then: 10296 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 10297 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 10298 // CHECK11: .omp.final.done: 10299 // CHECK11-NEXT: ret void 10300 // 10301 // 10302 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..1 10303 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 10304 // CHECK11-NEXT: entry: 10305 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10306 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10307 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 10308 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 10309 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10310 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10311 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10312 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10313 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10314 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10315 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 10316 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10317 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10318 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10319 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10320 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10321 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 10322 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10323 // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 10324 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10325 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 10326 // CHECK11-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 10327 // CHECK11-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 10328 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10329 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10330 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10331 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 10332 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10333 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10334 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 10335 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10336 // CHECK11: cond.true: 10337 // CHECK11-NEXT: br label [[COND_END:%.*]] 10338 // CHECK11: cond.false: 10339 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10340 // CHECK11-NEXT: br label [[COND_END]] 10341 // CHECK11: cond.end: 10342 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 10343 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10344 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10345 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 10346 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10347 // CHECK11: omp.inner.for.cond: 10348 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 10349 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19 10350 // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 10351 // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10352 // CHECK11: omp.inner.for.body: 10353 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 10354 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 10355 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 10356 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19 10357 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10358 // CHECK11: omp.body.continue: 10359 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10360 // CHECK11: omp.inner.for.inc: 10361 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 10362 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 10363 // CHECK11-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 10364 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 10365 // CHECK11: omp.inner.for.end: 10366 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10367 // CHECK11: omp.loop.exit: 10368 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 10369 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10370 // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 10371 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10372 // CHECK11: .omp.final.then: 10373 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 10374 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 10375 // CHECK11: .omp.final.done: 10376 // CHECK11-NEXT: ret void 10377 // 10378 // 10379 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48 10380 // CHECK11-SAME: () #[[ATTR1]] { 10381 // CHECK11-NEXT: entry: 10382 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*)) 10383 // CHECK11-NEXT: ret void 10384 // 10385 // 10386 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..2 10387 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 10388 // CHECK11-NEXT: entry: 10389 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10390 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10391 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10392 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10393 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 10394 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 10395 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10396 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10397 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 10398 // CHECK11-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 10399 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10400 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10401 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 10402 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 10403 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10404 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10405 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10406 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 10407 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10408 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10409 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 10410 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10411 // CHECK11: cond.true: 10412 // CHECK11-NEXT: br label [[COND_END:%.*]] 10413 // CHECK11: cond.false: 10414 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10415 // CHECK11-NEXT: br label [[COND_END]] 10416 // CHECK11: cond.end: 10417 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 10418 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 10419 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 10420 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 10421 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10422 // CHECK11: omp.inner.for.cond: 10423 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 10424 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24 10425 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 10426 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10427 // CHECK11: omp.inner.for.body: 10428 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !24 10429 // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 10430 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24 10431 // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 10432 // CHECK11-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !24 10433 // CHECK11-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !24 10434 // CHECK11-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !24 10435 // CHECK11-NEXT: call void @.omp_outlined..3(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !24 10436 // CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !24 10437 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10438 // CHECK11: omp.inner.for.inc: 10439 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 10440 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !24 10441 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 10442 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 10443 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 10444 // CHECK11: omp.inner.for.end: 10445 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10446 // CHECK11: omp.loop.exit: 10447 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 10448 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10449 // CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 10450 // CHECK11-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10451 // CHECK11: .omp.final.then: 10452 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 10453 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 10454 // CHECK11: .omp.final.done: 10455 // CHECK11-NEXT: ret void 10456 // 10457 // 10458 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3 10459 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 10460 // CHECK11-NEXT: entry: 10461 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10462 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10463 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 10464 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 10465 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10466 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10467 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10468 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10469 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10470 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10471 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 10472 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10473 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10474 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10475 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10476 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10477 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 10478 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10479 // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 10480 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10481 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 10482 // CHECK11-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 10483 // CHECK11-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 10484 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10485 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10486 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10487 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 10488 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10489 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10490 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 10491 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10492 // CHECK11: cond.true: 10493 // CHECK11-NEXT: br label [[COND_END:%.*]] 10494 // CHECK11: cond.false: 10495 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10496 // CHECK11-NEXT: br label [[COND_END]] 10497 // CHECK11: cond.end: 10498 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 10499 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10500 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10501 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 10502 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10503 // CHECK11: omp.inner.for.cond: 10504 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 10505 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 10506 // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 10507 // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10508 // CHECK11: omp.inner.for.body: 10509 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 10510 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 10511 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 10512 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27 10513 // CHECK11-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !27 10514 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10515 // CHECK11: omp.body.continue: 10516 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10517 // CHECK11: omp.inner.for.inc: 10518 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 10519 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 10520 // CHECK11-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 10521 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 10522 // CHECK11: omp.inner.for.end: 10523 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10524 // CHECK11: omp.loop.exit: 10525 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 10526 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10527 // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 10528 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10529 // CHECK11: .omp.final.then: 10530 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 10531 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 10532 // CHECK11: .omp.final.done: 10533 // CHECK11-NEXT: ret void 10534 // 10535 // 10536 // CHECK11-LABEL: define {{[^@]+}}@main 10537 // CHECK11-SAME: () #[[ATTR3:[0-9]+]] { 10538 // CHECK11-NEXT: entry: 10539 // CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 10540 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10541 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 10542 // CHECK11-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 10543 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 10544 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 10545 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 10546 // CHECK11-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 10547 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 10548 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 10549 // CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 10550 // CHECK11-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 10551 // CHECK11-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 10552 // CHECK11: omp_offload.failed: 10553 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81() #[[ATTR2]] 10554 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] 10555 // CHECK11: omp_offload.cont: 10556 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 10557 // CHECK11-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 10558 // CHECK11-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 10559 // CHECK11-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 10560 // CHECK11: omp_offload.failed2: 10561 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90() #[[ATTR2]] 10562 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT3]] 10563 // CHECK11: omp_offload.cont3: 10564 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* @Arg, align 4 10565 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 10566 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 10567 // CHECK11-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 10568 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 10569 // CHECK11-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 10570 // CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 10571 // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 10572 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 10573 // CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 10574 // CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 10575 // CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 8 10576 // CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 10577 // CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 10578 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 10579 // CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 10580 // CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 10581 // CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 10582 // CHECK11: omp_offload.failed5: 10583 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99(i64 [[TMP5]]) #[[ATTR2]] 10584 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT6]] 10585 // CHECK11: omp_offload.cont6: 10586 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 10587 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 10588 // CHECK11-NEXT: ret i32 [[CALL]] 10589 // 10590 // 10591 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 10592 // CHECK11-SAME: () #[[ATTR1]] { 10593 // CHECK11-NEXT: entry: 10594 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 10595 // CHECK11-NEXT: ret void 10596 // 10597 // 10598 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4 10599 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 10600 // CHECK11-NEXT: entry: 10601 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10602 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10603 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10604 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10605 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 10606 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 10607 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10608 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10609 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 10610 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10611 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10612 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 10613 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 10614 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10615 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10616 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10617 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 10618 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10619 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10620 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 10621 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10622 // CHECK11: cond.true: 10623 // CHECK11-NEXT: br label [[COND_END:%.*]] 10624 // CHECK11: cond.false: 10625 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10626 // CHECK11-NEXT: br label [[COND_END]] 10627 // CHECK11: cond.end: 10628 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 10629 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 10630 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 10631 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 10632 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10633 // CHECK11: omp.inner.for.cond: 10634 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 10635 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 10636 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 10637 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10638 // CHECK11: omp.inner.for.body: 10639 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !30 10640 // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 10641 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 10642 // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 10643 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !30 10644 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10645 // CHECK11: omp.inner.for.inc: 10646 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 10647 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !30 10648 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 10649 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 10650 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 10651 // CHECK11: omp.inner.for.end: 10652 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10653 // CHECK11: omp.loop.exit: 10654 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 10655 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10656 // CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 10657 // CHECK11-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10658 // CHECK11: .omp.final.then: 10659 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 10660 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 10661 // CHECK11: .omp.final.done: 10662 // CHECK11-NEXT: ret void 10663 // 10664 // 10665 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 10666 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 10667 // CHECK11-NEXT: entry: 10668 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10669 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10670 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 10671 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 10672 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10673 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10674 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10675 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10676 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10677 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10678 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 10679 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10680 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10681 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10682 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10683 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10684 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 10685 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10686 // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 10687 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10688 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 10689 // CHECK11-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 10690 // CHECK11-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 10691 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10692 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10693 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10694 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 10695 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10696 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10697 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 10698 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10699 // CHECK11: cond.true: 10700 // CHECK11-NEXT: br label [[COND_END:%.*]] 10701 // CHECK11: cond.false: 10702 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10703 // CHECK11-NEXT: br label [[COND_END]] 10704 // CHECK11: cond.end: 10705 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 10706 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10707 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10708 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 10709 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10710 // CHECK11: omp.inner.for.cond: 10711 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 10712 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33 10713 // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 10714 // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10715 // CHECK11: omp.inner.for.body: 10716 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 10717 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 10718 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 10719 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33 10720 // CHECK11-NEXT: call void @_Z3fn4v(), !llvm.access.group !33 10721 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10722 // CHECK11: omp.body.continue: 10723 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10724 // CHECK11: omp.inner.for.inc: 10725 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 10726 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 10727 // CHECK11-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 10728 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 10729 // CHECK11: omp.inner.for.end: 10730 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10731 // CHECK11: omp.loop.exit: 10732 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 10733 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10734 // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 10735 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10736 // CHECK11: .omp.final.then: 10737 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 10738 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 10739 // CHECK11: .omp.final.done: 10740 // CHECK11-NEXT: ret void 10741 // 10742 // 10743 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90 10744 // CHECK11-SAME: () #[[ATTR1]] { 10745 // CHECK11-NEXT: entry: 10746 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*)) 10747 // CHECK11-NEXT: ret void 10748 // 10749 // 10750 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 10751 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 10752 // CHECK11-NEXT: entry: 10753 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10754 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10755 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10756 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10757 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 10758 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 10759 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10760 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10761 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 10762 // CHECK11-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 10763 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10764 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10765 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 10766 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 10767 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10768 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10769 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10770 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 10771 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10772 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10773 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 10774 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10775 // CHECK11: cond.true: 10776 // CHECK11-NEXT: br label [[COND_END:%.*]] 10777 // CHECK11: cond.false: 10778 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10779 // CHECK11-NEXT: br label [[COND_END]] 10780 // CHECK11: cond.end: 10781 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 10782 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 10783 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 10784 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 10785 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10786 // CHECK11: omp.inner.for.cond: 10787 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10788 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10789 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 10790 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10791 // CHECK11: omp.inner.for.body: 10792 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 10793 // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 10794 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10795 // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 10796 // CHECK11-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 10797 // CHECK11-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10798 // CHECK11-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 10799 // CHECK11-NEXT: call void @.omp_outlined..7(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]] 10800 // CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 10801 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10802 // CHECK11: omp.inner.for.inc: 10803 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10804 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 10805 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 10806 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 10807 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 10808 // CHECK11: omp.inner.for.end: 10809 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10810 // CHECK11: omp.loop.exit: 10811 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 10812 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10813 // CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 10814 // CHECK11-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10815 // CHECK11: .omp.final.then: 10816 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 10817 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 10818 // CHECK11: .omp.final.done: 10819 // CHECK11-NEXT: ret void 10820 // 10821 // 10822 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 10823 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 10824 // CHECK11-NEXT: entry: 10825 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10826 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10827 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 10828 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 10829 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10830 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10831 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 10832 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 10833 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10834 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10835 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 10836 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10837 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10838 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10839 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10840 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 10841 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 10842 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 10843 // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 10844 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 10845 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 10846 // CHECK11-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 10847 // CHECK11-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 10848 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10849 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10850 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10851 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 10852 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10853 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10854 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 10855 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10856 // CHECK11: cond.true: 10857 // CHECK11-NEXT: br label [[COND_END:%.*]] 10858 // CHECK11: cond.false: 10859 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10860 // CHECK11-NEXT: br label [[COND_END]] 10861 // CHECK11: cond.end: 10862 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 10863 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 10864 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 10865 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 10866 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10867 // CHECK11: omp.inner.for.cond: 10868 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10869 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 10870 // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 10871 // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10872 // CHECK11: omp.inner.for.body: 10873 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10874 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 10875 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 10876 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4 10877 // CHECK11-NEXT: call void @_Z3fn5v() 10878 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 10879 // CHECK11: omp.body.continue: 10880 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10881 // CHECK11: omp.inner.for.inc: 10882 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 10883 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 10884 // CHECK11-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4 10885 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] 10886 // CHECK11: omp.inner.for.end: 10887 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 10888 // CHECK11: omp.loop.exit: 10889 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 10890 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 10891 // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 10892 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 10893 // CHECK11: .omp.final.then: 10894 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 10895 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 10896 // CHECK11: .omp.final.done: 10897 // CHECK11-NEXT: ret void 10898 // 10899 // 10900 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99 10901 // CHECK11-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 10902 // CHECK11-NEXT: entry: 10903 // CHECK11-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 10904 // CHECK11-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 10905 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 10906 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]]) 10907 // CHECK11-NEXT: ret void 10908 // 10909 // 10910 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..8 10911 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 10912 // CHECK11-NEXT: entry: 10913 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 10914 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 10915 // CHECK11-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 10916 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 10917 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 10918 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 10919 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 10920 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 10921 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 10922 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 10923 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 10924 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 10925 // CHECK11-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 10926 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED12:%.*]] = alloca i64, align 8 10927 // CHECK11-NEXT: [[DOTBOUND_ZERO_ADDR18:%.*]] = alloca i32, align 4 10928 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 10929 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 10930 // CHECK11-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 10931 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 10932 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 10933 // CHECK11-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 10934 // CHECK11-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 10935 // CHECK11-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 10936 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 10937 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 10938 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 10939 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 10940 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 10941 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 10942 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 10943 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10944 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 10945 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 10946 // CHECK11: cond.true: 10947 // CHECK11-NEXT: br label [[COND_END:%.*]] 10948 // CHECK11: cond.false: 10949 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 10950 // CHECK11-NEXT: br label [[COND_END]] 10951 // CHECK11: cond.end: 10952 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 10953 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 10954 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 10955 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 10956 // CHECK11-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 10957 // CHECK11-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP7]] to i1 10958 // CHECK11-NEXT: br i1 [[TOBOOL1]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE7:%.*]] 10959 // CHECK11: omp_if.then: 10960 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 10961 // CHECK11: omp.inner.for.cond: 10962 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 10963 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39 10964 // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 10965 // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 10966 // CHECK11: omp.inner.for.body: 10967 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !39 10968 // CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 10969 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39 10970 // CHECK11-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 10971 // CHECK11-NEXT: [[TMP14:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !39 10972 // CHECK11-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 10973 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 10974 // CHECK11-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[TOBOOL3]] to i8 10975 // CHECK11-NEXT: store i8 [[FROMBOOL4]], i8* [[CONV]], align 1, !llvm.access.group !39 10976 // CHECK11-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !39 10977 // CHECK11-NEXT: [[TMP16:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !39 10978 // CHECK11-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP16]] to i1 10979 // CHECK11-NEXT: br i1 [[TOBOOL5]], label [[OMP_IF_THEN6:%.*]], label [[OMP_IF_ELSE:%.*]] 10980 // CHECK11: omp_if.then6: 10981 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP11]], i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group !39 10982 // CHECK11-NEXT: br label [[OMP_IF_END:%.*]] 10983 // CHECK11: omp_if.else: 10984 // CHECK11-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]), !llvm.access.group !39 10985 // CHECK11-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !39 10986 // CHECK11-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !39 10987 // CHECK11-NEXT: call void @.omp_outlined..9(i32* [[TMP17]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP11]], i64 [[TMP13]], i64 [[TMP15]]) #[[ATTR2]], !llvm.access.group !39 10988 // CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]), !llvm.access.group !39 10989 // CHECK11-NEXT: br label [[OMP_IF_END]] 10990 // CHECK11: omp_if.end: 10991 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 10992 // CHECK11: omp.inner.for.inc: 10993 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 10994 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !39 10995 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 10996 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 10997 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 10998 // CHECK11: omp.inner.for.end: 10999 // CHECK11-NEXT: br label [[OMP_IF_END23:%.*]] 11000 // CHECK11: omp_if.else7: 11001 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 11002 // CHECK11: omp.inner.for.cond8: 11003 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11004 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11005 // CHECK11-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] 11006 // CHECK11-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END22:%.*]] 11007 // CHECK11: omp.inner.for.body10: 11008 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 11009 // CHECK11-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64 11010 // CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11011 // CHECK11-NEXT: [[TMP25:%.*]] = zext i32 [[TMP24]] to i64 11012 // CHECK11-NEXT: [[TMP26:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 11013 // CHECK11-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP26]] to i1 11014 // CHECK11-NEXT: [[CONV13:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED12]] to i8* 11015 // CHECK11-NEXT: [[FROMBOOL14:%.*]] = zext i1 [[TOBOOL11]] to i8 11016 // CHECK11-NEXT: store i8 [[FROMBOOL14]], i8* [[CONV13]], align 1 11017 // CHECK11-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED12]], align 8 11018 // CHECK11-NEXT: [[TMP28:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 11019 // CHECK11-NEXT: [[TOBOOL15:%.*]] = trunc i8 [[TMP28]] to i1 11020 // CHECK11-NEXT: br i1 [[TOBOOL15]], label [[OMP_IF_THEN16:%.*]], label [[OMP_IF_ELSE17:%.*]] 11021 // CHECK11: omp_if.then16: 11022 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP23]], i64 [[TMP25]], i64 [[TMP27]]) 11023 // CHECK11-NEXT: br label [[OMP_IF_END19:%.*]] 11024 // CHECK11: omp_if.else17: 11025 // CHECK11-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 11026 // CHECK11-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11027 // CHECK11-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR18]], align 4 11028 // CHECK11-NEXT: call void @.omp_outlined..10(i32* [[TMP29]], i32* [[DOTBOUND_ZERO_ADDR18]], i64 [[TMP23]], i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR2]] 11029 // CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 11030 // CHECK11-NEXT: br label [[OMP_IF_END19]] 11031 // CHECK11: omp_if.end19: 11032 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC20:%.*]] 11033 // CHECK11: omp.inner.for.inc20: 11034 // CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11035 // CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 11036 // CHECK11-NEXT: [[ADD21:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 11037 // CHECK11-NEXT: store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4 11038 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP42:![0-9]+]] 11039 // CHECK11: omp.inner.for.end22: 11040 // CHECK11-NEXT: br label [[OMP_IF_END23]] 11041 // CHECK11: omp_if.end23: 11042 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11043 // CHECK11: omp.loop.exit: 11044 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 11045 // CHECK11-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11046 // CHECK11-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 11047 // CHECK11-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11048 // CHECK11: .omp.final.then: 11049 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 11050 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 11051 // CHECK11: .omp.final.done: 11052 // CHECK11-NEXT: ret void 11053 // 11054 // 11055 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..9 11056 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 11057 // CHECK11-NEXT: entry: 11058 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11059 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11060 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 11061 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 11062 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 11063 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11064 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 11065 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 11066 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 11067 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11068 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11069 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 11070 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11071 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11072 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11073 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11074 // CHECK11-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 11075 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 11076 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 11077 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 11078 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11079 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32 11080 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11081 // CHECK11-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 11082 // CHECK11-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4 11083 // CHECK11-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4 11084 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11085 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11086 // CHECK11-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1 11087 // CHECK11-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 11088 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11089 // CHECK11: omp_if.then: 11090 // CHECK11-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11091 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 11092 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11093 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11094 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 11095 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11096 // CHECK11: cond.true: 11097 // CHECK11-NEXT: br label [[COND_END:%.*]] 11098 // CHECK11: cond.false: 11099 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11100 // CHECK11-NEXT: br label [[COND_END]] 11101 // CHECK11: cond.end: 11102 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 11103 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 11104 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11105 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 11106 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11107 // CHECK11: omp.inner.for.cond: 11108 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 11109 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !43 11110 // CHECK11-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 11111 // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11112 // CHECK11: omp.inner.for.body: 11113 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 11114 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 11115 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 11116 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !43 11117 // CHECK11-NEXT: call void @_Z3fn6v(), !llvm.access.group !43 11118 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11119 // CHECK11: omp.body.continue: 11120 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11121 // CHECK11: omp.inner.for.inc: 11122 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 11123 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 11124 // CHECK11-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 11125 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] 11126 // CHECK11: omp.inner.for.end: 11127 // CHECK11-NEXT: br label [[OMP_IF_END:%.*]] 11128 // CHECK11: omp_if.else: 11129 // CHECK11-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11130 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 11131 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11132 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11133 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP14]], 99 11134 // CHECK11-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]] 11135 // CHECK11: cond.true6: 11136 // CHECK11-NEXT: br label [[COND_END8:%.*]] 11137 // CHECK11: cond.false7: 11138 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11139 // CHECK11-NEXT: br label [[COND_END8]] 11140 // CHECK11: cond.end8: 11141 // CHECK11-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP15]], [[COND_FALSE7]] ] 11142 // CHECK11-NEXT: store i32 [[COND9]], i32* [[DOTOMP_UB]], align 4 11143 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11144 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4 11145 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] 11146 // CHECK11: omp.inner.for.cond10: 11147 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11148 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11149 // CHECK11-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 11150 // CHECK11-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 11151 // CHECK11: omp.inner.for.body12: 11152 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11153 // CHECK11-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1 11154 // CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 11155 // CHECK11-NEXT: store i32 [[ADD14]], i32* [[I]], align 4 11156 // CHECK11-NEXT: call void @_Z3fn6v() 11157 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 11158 // CHECK11: omp.body.continue15: 11159 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 11160 // CHECK11: omp.inner.for.inc16: 11161 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11162 // CHECK11-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP20]], 1 11163 // CHECK11-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4 11164 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP46:![0-9]+]] 11165 // CHECK11: omp.inner.for.end18: 11166 // CHECK11-NEXT: br label [[OMP_IF_END]] 11167 // CHECK11: omp_if.end: 11168 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11169 // CHECK11: omp.loop.exit: 11170 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11171 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4 11172 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]]) 11173 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11174 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 11175 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11176 // CHECK11: .omp.final.then: 11177 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 11178 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 11179 // CHECK11: .omp.final.done: 11180 // CHECK11-NEXT: ret void 11181 // 11182 // 11183 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 11184 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 11185 // CHECK11-NEXT: entry: 11186 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11187 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11188 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 11189 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 11190 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 11191 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11192 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 11193 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 11194 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 11195 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11196 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11197 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 11198 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11199 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11200 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11201 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11202 // CHECK11-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 11203 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 11204 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 11205 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 11206 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11207 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32 11208 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11209 // CHECK11-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 11210 // CHECK11-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4 11211 // CHECK11-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4 11212 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11213 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11214 // CHECK11-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1 11215 // CHECK11-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 11216 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11217 // CHECK11: omp_if.then: 11218 // CHECK11-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11219 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 11220 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11221 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11222 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 11223 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11224 // CHECK11: cond.true: 11225 // CHECK11-NEXT: br label [[COND_END:%.*]] 11226 // CHECK11: cond.false: 11227 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11228 // CHECK11-NEXT: br label [[COND_END]] 11229 // CHECK11: cond.end: 11230 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 11231 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 11232 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11233 // CHECK11-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 11234 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11235 // CHECK11: omp.inner.for.cond: 11236 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 11237 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47 11238 // CHECK11-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 11239 // CHECK11-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11240 // CHECK11: omp.inner.for.body: 11241 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 11242 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 11243 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 11244 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !47 11245 // CHECK11-NEXT: call void @_Z3fn6v(), !llvm.access.group !47 11246 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11247 // CHECK11: omp.body.continue: 11248 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11249 // CHECK11: omp.inner.for.inc: 11250 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 11251 // CHECK11-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 11252 // CHECK11-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 11253 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] 11254 // CHECK11: omp.inner.for.end: 11255 // CHECK11-NEXT: br label [[OMP_IF_END:%.*]] 11256 // CHECK11: omp_if.else: 11257 // CHECK11-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11258 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 11259 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11260 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11261 // CHECK11-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP14]], 99 11262 // CHECK11-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]] 11263 // CHECK11: cond.true6: 11264 // CHECK11-NEXT: br label [[COND_END8:%.*]] 11265 // CHECK11: cond.false7: 11266 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11267 // CHECK11-NEXT: br label [[COND_END8]] 11268 // CHECK11: cond.end8: 11269 // CHECK11-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP15]], [[COND_FALSE7]] ] 11270 // CHECK11-NEXT: store i32 [[COND9]], i32* [[DOTOMP_UB]], align 4 11271 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11272 // CHECK11-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4 11273 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] 11274 // CHECK11: omp.inner.for.cond10: 11275 // CHECK11-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11276 // CHECK11-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11277 // CHECK11-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 11278 // CHECK11-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 11279 // CHECK11: omp.inner.for.body12: 11280 // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11281 // CHECK11-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1 11282 // CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 11283 // CHECK11-NEXT: store i32 [[ADD14]], i32* [[I]], align 4 11284 // CHECK11-NEXT: call void @_Z3fn6v() 11285 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 11286 // CHECK11: omp.body.continue15: 11287 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 11288 // CHECK11: omp.inner.for.inc16: 11289 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11290 // CHECK11-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP20]], 1 11291 // CHECK11-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4 11292 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP50:![0-9]+]] 11293 // CHECK11: omp.inner.for.end18: 11294 // CHECK11-NEXT: br label [[OMP_IF_END]] 11295 // CHECK11: omp_if.end: 11296 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11297 // CHECK11: omp.loop.exit: 11298 // CHECK11-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11299 // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4 11300 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]]) 11301 // CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11302 // CHECK11-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 11303 // CHECK11-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11304 // CHECK11: .omp.final.then: 11305 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 11306 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 11307 // CHECK11: .omp.final.done: 11308 // CHECK11-NEXT: ret void 11309 // 11310 // 11311 // CHECK11-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 11312 // CHECK11-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 11313 // CHECK11-NEXT: entry: 11314 // CHECK11-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 11315 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 11316 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 11317 // CHECK11-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 11318 // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 11319 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 11320 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 11321 // CHECK11-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 11322 // CHECK11-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 11323 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 11324 // CHECK11-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 11325 // CHECK11-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 11326 // CHECK11-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11327 // CHECK11: omp_offload.failed: 11328 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59() #[[ATTR2]] 11329 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] 11330 // CHECK11: omp_offload.cont: 11331 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 11332 // CHECK11-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 11333 // CHECK11-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 11334 // CHECK11-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 11335 // CHECK11: omp_offload.failed2: 11336 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65() #[[ATTR2]] 11337 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT3]] 11338 // CHECK11: omp_offload.cont3: 11339 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 11340 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 11341 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 11342 // CHECK11-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 11343 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11344 // CHECK11-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 11345 // CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 11346 // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11347 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 11348 // CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 11349 // CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 11350 // CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 8 11351 // CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 11352 // CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 11353 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 11354 // CHECK11-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 11355 // CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 11356 // CHECK11-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 11357 // CHECK11: omp_offload.failed5: 11358 // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71(i64 [[TMP5]]) #[[ATTR2]] 11359 // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT6]] 11360 // CHECK11: omp_offload.cont6: 11361 // CHECK11-NEXT: ret i32 0 11362 // 11363 // 11364 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59 11365 // CHECK11-SAME: () #[[ATTR1]] { 11366 // CHECK11-NEXT: entry: 11367 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*)) 11368 // CHECK11-NEXT: ret void 11369 // 11370 // 11371 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11 11372 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 11373 // CHECK11-NEXT: entry: 11374 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11375 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11376 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11377 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 11378 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 11379 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 11380 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11381 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11382 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 11383 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11384 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11385 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 11386 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 11387 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11388 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11389 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11390 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 11391 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11392 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11393 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 11394 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11395 // CHECK11: cond.true: 11396 // CHECK11-NEXT: br label [[COND_END:%.*]] 11397 // CHECK11: cond.false: 11398 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11399 // CHECK11-NEXT: br label [[COND_END]] 11400 // CHECK11: cond.end: 11401 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 11402 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 11403 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 11404 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 11405 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11406 // CHECK11: omp.inner.for.cond: 11407 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 11408 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51 11409 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 11410 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11411 // CHECK11: omp.inner.for.body: 11412 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !51 11413 // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 11414 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51 11415 // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 11416 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !51 11417 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11418 // CHECK11: omp.inner.for.inc: 11419 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 11420 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !51 11421 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 11422 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 11423 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]] 11424 // CHECK11: omp.inner.for.end: 11425 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11426 // CHECK11: omp.loop.exit: 11427 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 11428 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11429 // CHECK11-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 11430 // CHECK11-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11431 // CHECK11: .omp.final.then: 11432 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 11433 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 11434 // CHECK11: .omp.final.done: 11435 // CHECK11-NEXT: ret void 11436 // 11437 // 11438 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..12 11439 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 11440 // CHECK11-NEXT: entry: 11441 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11442 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11443 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 11444 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 11445 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11446 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 11447 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 11448 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 11449 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11450 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11451 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 11452 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11453 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11454 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11455 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11456 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 11457 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 11458 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11459 // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 11460 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11461 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 11462 // CHECK11-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 11463 // CHECK11-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 11464 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11465 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11466 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11467 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 11468 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11469 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11470 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 11471 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11472 // CHECK11: cond.true: 11473 // CHECK11-NEXT: br label [[COND_END:%.*]] 11474 // CHECK11: cond.false: 11475 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11476 // CHECK11-NEXT: br label [[COND_END]] 11477 // CHECK11: cond.end: 11478 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 11479 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 11480 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11481 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 11482 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11483 // CHECK11: omp.inner.for.cond: 11484 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 11485 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !54 11486 // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 11487 // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11488 // CHECK11: omp.inner.for.body: 11489 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 11490 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 11491 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 11492 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !54 11493 // CHECK11-NEXT: call void @_Z3fn1v(), !llvm.access.group !54 11494 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11495 // CHECK11: omp.body.continue: 11496 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11497 // CHECK11: omp.inner.for.inc: 11498 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 11499 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 11500 // CHECK11-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 11501 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]] 11502 // CHECK11: omp.inner.for.end: 11503 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11504 // CHECK11: omp.loop.exit: 11505 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 11506 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11507 // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 11508 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11509 // CHECK11: .omp.final.then: 11510 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 11511 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 11512 // CHECK11: .omp.final.done: 11513 // CHECK11-NEXT: ret void 11514 // 11515 // 11516 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65 11517 // CHECK11-SAME: () #[[ATTR1]] { 11518 // CHECK11-NEXT: entry: 11519 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..13 to void (i32*, i32*, ...)*)) 11520 // CHECK11-NEXT: ret void 11521 // 11522 // 11523 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..13 11524 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 11525 // CHECK11-NEXT: entry: 11526 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11527 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11528 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11529 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 11530 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 11531 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 11532 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11533 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11534 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 11535 // CHECK11-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 11536 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11537 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11538 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 11539 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 11540 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11541 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11542 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11543 // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 11544 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11545 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11546 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 11547 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11548 // CHECK11: cond.true: 11549 // CHECK11-NEXT: br label [[COND_END:%.*]] 11550 // CHECK11: cond.false: 11551 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11552 // CHECK11-NEXT: br label [[COND_END]] 11553 // CHECK11: cond.end: 11554 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 11555 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 11556 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 11557 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 11558 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11559 // CHECK11: omp.inner.for.cond: 11560 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11561 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11562 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 11563 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11564 // CHECK11: omp.inner.for.body: 11565 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 11566 // CHECK11-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 11567 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11568 // CHECK11-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 11569 // CHECK11-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 11570 // CHECK11-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11571 // CHECK11-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 11572 // CHECK11-NEXT: call void @.omp_outlined..14(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]] 11573 // CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 11574 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11575 // CHECK11: omp.inner.for.inc: 11576 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11577 // CHECK11-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 11578 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 11579 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 11580 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]] 11581 // CHECK11: omp.inner.for.end: 11582 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11583 // CHECK11: omp.loop.exit: 11584 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 11585 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11586 // CHECK11-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 11587 // CHECK11-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11588 // CHECK11: .omp.final.then: 11589 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 11590 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 11591 // CHECK11: .omp.final.done: 11592 // CHECK11-NEXT: ret void 11593 // 11594 // 11595 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..14 11596 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 11597 // CHECK11-NEXT: entry: 11598 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11599 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11600 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 11601 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 11602 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11603 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 11604 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 11605 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 11606 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11607 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11608 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 11609 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11610 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11611 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11612 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11613 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 11614 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 11615 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11616 // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 11617 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11618 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 11619 // CHECK11-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 11620 // CHECK11-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 11621 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11622 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11623 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11624 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 11625 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11626 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11627 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 11628 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11629 // CHECK11: cond.true: 11630 // CHECK11-NEXT: br label [[COND_END:%.*]] 11631 // CHECK11: cond.false: 11632 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11633 // CHECK11-NEXT: br label [[COND_END]] 11634 // CHECK11: cond.end: 11635 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 11636 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 11637 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11638 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 11639 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11640 // CHECK11: omp.inner.for.cond: 11641 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11642 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11643 // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 11644 // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11645 // CHECK11: omp.inner.for.body: 11646 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11647 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 11648 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 11649 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4 11650 // CHECK11-NEXT: call void @_Z3fn2v() 11651 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11652 // CHECK11: omp.body.continue: 11653 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11654 // CHECK11: omp.inner.for.inc: 11655 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 11656 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 11657 // CHECK11-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4 11658 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]] 11659 // CHECK11: omp.inner.for.end: 11660 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11661 // CHECK11: omp.loop.exit: 11662 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 11663 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11664 // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 11665 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11666 // CHECK11: .omp.final.then: 11667 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 11668 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 11669 // CHECK11: .omp.final.done: 11670 // CHECK11-NEXT: ret void 11671 // 11672 // 11673 // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71 11674 // CHECK11-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 11675 // CHECK11-NEXT: entry: 11676 // CHECK11-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 11677 // CHECK11-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 11678 // CHECK11-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 11679 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32* [[CONV]]) 11680 // CHECK11-NEXT: ret void 11681 // 11682 // 11683 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15 11684 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 11685 // CHECK11-NEXT: entry: 11686 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11687 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11688 // CHECK11-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 11689 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11690 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 11691 // CHECK11-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 11692 // CHECK11-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 11693 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11694 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11695 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 11696 // CHECK11-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 11697 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11698 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11699 // CHECK11-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 11700 // CHECK11-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 11701 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 11702 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 11703 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11704 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11705 // CHECK11-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11706 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 11707 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11708 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11709 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 11710 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11711 // CHECK11: cond.true: 11712 // CHECK11-NEXT: br label [[COND_END:%.*]] 11713 // CHECK11: cond.false: 11714 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11715 // CHECK11-NEXT: br label [[COND_END]] 11716 // CHECK11: cond.end: 11717 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 11718 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 11719 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 11720 // CHECK11-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 11721 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11722 // CHECK11: omp.inner.for.cond: 11723 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 11724 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59 11725 // CHECK11-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 11726 // CHECK11-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11727 // CHECK11: omp.inner.for.body: 11728 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !59 11729 // CHECK11-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 11730 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59 11731 // CHECK11-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 11732 // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !59 11733 // CHECK11-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 11734 // CHECK11-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 11735 // CHECK11: omp_if.then: 11736 // CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !59 11737 // CHECK11-NEXT: br label [[OMP_IF_END:%.*]] 11738 // CHECK11: omp_if.else: 11739 // CHECK11-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !59 11740 // CHECK11-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !59 11741 // CHECK11-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !59 11742 // CHECK11-NEXT: call void @.omp_outlined..16(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !59 11743 // CHECK11-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !59 11744 // CHECK11-NEXT: br label [[OMP_IF_END]] 11745 // CHECK11: omp_if.end: 11746 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11747 // CHECK11: omp.inner.for.inc: 11748 // CHECK11-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 11749 // CHECK11-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !59 11750 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 11751 // CHECK11-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 11752 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]] 11753 // CHECK11: omp.inner.for.end: 11754 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11755 // CHECK11: omp.loop.exit: 11756 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 11757 // CHECK11-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11758 // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 11759 // CHECK11-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11760 // CHECK11: .omp.final.then: 11761 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 11762 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 11763 // CHECK11: .omp.final.done: 11764 // CHECK11-NEXT: ret void 11765 // 11766 // 11767 // CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..16 11768 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 11769 // CHECK11-NEXT: entry: 11770 // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11771 // CHECK11-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11772 // CHECK11-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 11773 // CHECK11-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 11774 // CHECK11-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11775 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 11776 // CHECK11-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 11777 // CHECK11-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 11778 // CHECK11-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11779 // CHECK11-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11780 // CHECK11-NEXT: [[I:%.*]] = alloca i32, align 4 11781 // CHECK11-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11782 // CHECK11-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11783 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11784 // CHECK11-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11785 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 11786 // CHECK11-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 11787 // CHECK11-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11788 // CHECK11-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 11789 // CHECK11-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11790 // CHECK11-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 11791 // CHECK11-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 11792 // CHECK11-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 11793 // CHECK11-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11794 // CHECK11-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11795 // CHECK11-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11796 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 11797 // CHECK11-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11798 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11799 // CHECK11-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 11800 // CHECK11-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11801 // CHECK11: cond.true: 11802 // CHECK11-NEXT: br label [[COND_END:%.*]] 11803 // CHECK11: cond.false: 11804 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11805 // CHECK11-NEXT: br label [[COND_END]] 11806 // CHECK11: cond.end: 11807 // CHECK11-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 11808 // CHECK11-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 11809 // CHECK11-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11810 // CHECK11-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 11811 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11812 // CHECK11: omp.inner.for.cond: 11813 // CHECK11-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62 11814 // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !62 11815 // CHECK11-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 11816 // CHECK11-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11817 // CHECK11: omp.inner.for.body: 11818 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62 11819 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 11820 // CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 11821 // CHECK11-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !62 11822 // CHECK11-NEXT: call void @_Z3fn3v(), !llvm.access.group !62 11823 // CHECK11-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 11824 // CHECK11: omp.body.continue: 11825 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11826 // CHECK11: omp.inner.for.inc: 11827 // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62 11828 // CHECK11-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 11829 // CHECK11-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62 11830 // CHECK11-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]] 11831 // CHECK11: omp.inner.for.end: 11832 // CHECK11-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11833 // CHECK11: omp.loop.exit: 11834 // CHECK11-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 11835 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11836 // CHECK11-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 11837 // CHECK11-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11838 // CHECK11: .omp.final.then: 11839 // CHECK11-NEXT: store i32 100, i32* [[I]], align 4 11840 // CHECK11-NEXT: br label [[DOTOMP_FINAL_DONE]] 11841 // CHECK11: .omp.final.done: 11842 // CHECK11-NEXT: ret void 11843 // 11844 // 11845 // CHECK11-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 11846 // CHECK11-SAME: () #[[ATTR5:[0-9]+]] { 11847 // CHECK11-NEXT: entry: 11848 // CHECK11-NEXT: call void @__tgt_register_requires(i64 1) 11849 // CHECK11-NEXT: ret void 11850 // 11851 // 11852 // CHECK12-LABEL: define {{[^@]+}}@_Z9gtid_testv 11853 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] { 11854 // CHECK12-NEXT: entry: 11855 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 11856 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 11857 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 100) 11858 // CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 11859 // CHECK12-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 11860 // CHECK12-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 11861 // CHECK12: omp_offload.failed: 11862 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43() #[[ATTR2:[0-9]+]] 11863 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] 11864 // CHECK12: omp_offload.cont: 11865 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 11866 // CHECK12-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 11867 // CHECK12-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 11868 // CHECK12-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 11869 // CHECK12: omp_offload.failed2: 11870 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48() #[[ATTR2]] 11871 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT3]] 11872 // CHECK12: omp_offload.cont3: 11873 // CHECK12-NEXT: ret void 11874 // 11875 // 11876 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l43 11877 // CHECK12-SAME: () #[[ATTR1:[0-9]+]] { 11878 // CHECK12-NEXT: entry: 11879 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 11880 // CHECK12-NEXT: ret void 11881 // 11882 // 11883 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined. 11884 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 11885 // CHECK12-NEXT: entry: 11886 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11887 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11888 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11889 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 11890 // CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 11891 // CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 11892 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11893 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11894 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 11895 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11896 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11897 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 11898 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 11899 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11900 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11901 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11902 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 11903 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11904 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11905 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 11906 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11907 // CHECK12: cond.true: 11908 // CHECK12-NEXT: br label [[COND_END:%.*]] 11909 // CHECK12: cond.false: 11910 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 11911 // CHECK12-NEXT: br label [[COND_END]] 11912 // CHECK12: cond.end: 11913 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 11914 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 11915 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 11916 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 11917 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11918 // CHECK12: omp.inner.for.cond: 11919 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 11920 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 11921 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 11922 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 11923 // CHECK12: omp.inner.for.body: 11924 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !15 11925 // CHECK12-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 11926 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !15 11927 // CHECK12-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 11928 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !15 11929 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 11930 // CHECK12: omp.inner.for.inc: 11931 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 11932 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !15 11933 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 11934 // CHECK12-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !15 11935 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]] 11936 // CHECK12: omp.inner.for.end: 11937 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 11938 // CHECK12: omp.loop.exit: 11939 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 11940 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 11941 // CHECK12-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 11942 // CHECK12-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 11943 // CHECK12: .omp.final.then: 11944 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 11945 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 11946 // CHECK12: .omp.final.done: 11947 // CHECK12-NEXT: ret void 11948 // 11949 // 11950 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..1 11951 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 11952 // CHECK12-NEXT: entry: 11953 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 11954 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 11955 // CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 11956 // CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 11957 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 11958 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 11959 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 11960 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 11961 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 11962 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 11963 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 11964 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 11965 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 11966 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11967 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11968 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 11969 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 11970 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 11971 // CHECK12-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 11972 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 11973 // CHECK12-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 11974 // CHECK12-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 11975 // CHECK12-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 11976 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 11977 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 11978 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 11979 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 11980 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 11981 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11982 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 11983 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 11984 // CHECK12: cond.true: 11985 // CHECK12-NEXT: br label [[COND_END:%.*]] 11986 // CHECK12: cond.false: 11987 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 11988 // CHECK12-NEXT: br label [[COND_END]] 11989 // CHECK12: cond.end: 11990 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 11991 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 11992 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 11993 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 11994 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 11995 // CHECK12: omp.inner.for.cond: 11996 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 11997 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !19 11998 // CHECK12-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 11999 // CHECK12-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12000 // CHECK12: omp.inner.for.body: 12001 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 12002 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 12003 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 12004 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !19 12005 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12006 // CHECK12: omp.body.continue: 12007 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12008 // CHECK12: omp.inner.for.inc: 12009 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 12010 // CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 12011 // CHECK12-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !19 12012 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP20:![0-9]+]] 12013 // CHECK12: omp.inner.for.end: 12014 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12015 // CHECK12: omp.loop.exit: 12016 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 12017 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12018 // CHECK12-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 12019 // CHECK12-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12020 // CHECK12: .omp.final.then: 12021 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12022 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12023 // CHECK12: .omp.final.done: 12024 // CHECK12-NEXT: ret void 12025 // 12026 // 12027 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9gtid_testv_l48 12028 // CHECK12-SAME: () #[[ATTR1]] { 12029 // CHECK12-NEXT: entry: 12030 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*)) 12031 // CHECK12-NEXT: ret void 12032 // 12033 // 12034 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..2 12035 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 12036 // CHECK12-NEXT: entry: 12037 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12038 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12039 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12040 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12041 // CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 12042 // CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 12043 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12044 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12045 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 12046 // CHECK12-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 12047 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12048 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12049 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 12050 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 12051 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12052 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12053 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12054 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 12055 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12056 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12057 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 12058 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12059 // CHECK12: cond.true: 12060 // CHECK12-NEXT: br label [[COND_END:%.*]] 12061 // CHECK12: cond.false: 12062 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12063 // CHECK12-NEXT: br label [[COND_END]] 12064 // CHECK12: cond.end: 12065 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 12066 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 12067 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 12068 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 12069 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12070 // CHECK12: omp.inner.for.cond: 12071 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 12072 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24 12073 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 12074 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12075 // CHECK12: omp.inner.for.body: 12076 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !24 12077 // CHECK12-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 12078 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !24 12079 // CHECK12-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 12080 // CHECK12-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !24 12081 // CHECK12-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !24 12082 // CHECK12-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !24 12083 // CHECK12-NEXT: call void @.omp_outlined..3(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]], !llvm.access.group !24 12084 // CHECK12-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]), !llvm.access.group !24 12085 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12086 // CHECK12: omp.inner.for.inc: 12087 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 12088 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !24 12089 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 12090 // CHECK12-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 12091 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP25:![0-9]+]] 12092 // CHECK12: omp.inner.for.end: 12093 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12094 // CHECK12: omp.loop.exit: 12095 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 12096 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12097 // CHECK12-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 12098 // CHECK12-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12099 // CHECK12: .omp.final.then: 12100 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12101 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12102 // CHECK12: .omp.final.done: 12103 // CHECK12-NEXT: ret void 12104 // 12105 // 12106 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3 12107 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 12108 // CHECK12-NEXT: entry: 12109 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12110 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12111 // CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 12112 // CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 12113 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12114 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12115 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12116 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12117 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12118 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12119 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 12120 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12121 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12122 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12123 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12124 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12125 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 12126 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12127 // CHECK12-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 12128 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12129 // CHECK12-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 12130 // CHECK12-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 12131 // CHECK12-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 12132 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12133 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12134 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12135 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 12136 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12137 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12138 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 12139 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12140 // CHECK12: cond.true: 12141 // CHECK12-NEXT: br label [[COND_END:%.*]] 12142 // CHECK12: cond.false: 12143 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12144 // CHECK12-NEXT: br label [[COND_END]] 12145 // CHECK12: cond.end: 12146 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 12147 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12148 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12149 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 12150 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12151 // CHECK12: omp.inner.for.cond: 12152 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 12153 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !27 12154 // CHECK12-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 12155 // CHECK12-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12156 // CHECK12: omp.inner.for.body: 12157 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 12158 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 12159 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 12160 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !27 12161 // CHECK12-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !27 12162 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12163 // CHECK12: omp.body.continue: 12164 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12165 // CHECK12: omp.inner.for.inc: 12166 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 12167 // CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 12168 // CHECK12-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !27 12169 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] 12170 // CHECK12: omp.inner.for.end: 12171 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12172 // CHECK12: omp.loop.exit: 12173 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 12174 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12175 // CHECK12-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 12176 // CHECK12-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12177 // CHECK12: .omp.final.then: 12178 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12179 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12180 // CHECK12: .omp.final.done: 12181 // CHECK12-NEXT: ret void 12182 // 12183 // 12184 // CHECK12-LABEL: define {{[^@]+}}@main 12185 // CHECK12-SAME: () #[[ATTR3:[0-9]+]] { 12186 // CHECK12-NEXT: entry: 12187 // CHECK12-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 12188 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12189 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 12190 // CHECK12-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 12191 // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 12192 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 12193 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 12194 // CHECK12-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 12195 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 12196 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 12197 // CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 12198 // CHECK12-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 12199 // CHECK12-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 12200 // CHECK12: omp_offload.failed: 12201 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81() #[[ATTR2]] 12202 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] 12203 // CHECK12: omp_offload.cont: 12204 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 12205 // CHECK12-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 12206 // CHECK12-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 12207 // CHECK12-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 12208 // CHECK12: omp_offload.failed2: 12209 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90() #[[ATTR2]] 12210 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT3]] 12211 // CHECK12: omp_offload.cont3: 12212 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* @Arg, align 4 12213 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 12214 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 12215 // CHECK12-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 12216 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12217 // CHECK12-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 12218 // CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 12219 // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12220 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 12221 // CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 12222 // CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 12223 // CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 8 12224 // CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12225 // CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12226 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 12227 // CHECK12-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 12228 // CHECK12-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 12229 // CHECK12-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 12230 // CHECK12: omp_offload.failed5: 12231 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99(i64 [[TMP5]]) #[[ATTR2]] 12232 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT6]] 12233 // CHECK12: omp_offload.cont6: 12234 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 12235 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 12236 // CHECK12-NEXT: ret i32 [[CALL]] 12237 // 12238 // 12239 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 12240 // CHECK12-SAME: () #[[ATTR1]] { 12241 // CHECK12-NEXT: entry: 12242 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*)) 12243 // CHECK12-NEXT: ret void 12244 // 12245 // 12246 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..4 12247 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 12248 // CHECK12-NEXT: entry: 12249 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12250 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12251 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12252 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12253 // CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 12254 // CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 12255 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12256 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12257 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 12258 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12259 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12260 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 12261 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 12262 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12263 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12264 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12265 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 12266 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12267 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12268 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 12269 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12270 // CHECK12: cond.true: 12271 // CHECK12-NEXT: br label [[COND_END:%.*]] 12272 // CHECK12: cond.false: 12273 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12274 // CHECK12-NEXT: br label [[COND_END]] 12275 // CHECK12: cond.end: 12276 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 12277 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 12278 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 12279 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 12280 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12281 // CHECK12: omp.inner.for.cond: 12282 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 12283 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 12284 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 12285 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12286 // CHECK12: omp.inner.for.body: 12287 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !30 12288 // CHECK12-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 12289 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 12290 // CHECK12-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 12291 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !30 12292 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12293 // CHECK12: omp.inner.for.inc: 12294 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 12295 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !30 12296 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 12297 // CHECK12-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 12298 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP31:![0-9]+]] 12299 // CHECK12: omp.inner.for.end: 12300 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12301 // CHECK12: omp.loop.exit: 12302 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 12303 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12304 // CHECK12-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 12305 // CHECK12-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12306 // CHECK12: .omp.final.then: 12307 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12308 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12309 // CHECK12: .omp.final.done: 12310 // CHECK12-NEXT: ret void 12311 // 12312 // 12313 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 12314 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 12315 // CHECK12-NEXT: entry: 12316 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12317 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12318 // CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 12319 // CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 12320 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12321 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12322 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12323 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12324 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12325 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12326 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 12327 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12328 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12329 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12330 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12331 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12332 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 12333 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12334 // CHECK12-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 12335 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12336 // CHECK12-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 12337 // CHECK12-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 12338 // CHECK12-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 12339 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12340 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12341 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12342 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 12343 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12344 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12345 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 12346 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12347 // CHECK12: cond.true: 12348 // CHECK12-NEXT: br label [[COND_END:%.*]] 12349 // CHECK12: cond.false: 12350 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12351 // CHECK12-NEXT: br label [[COND_END]] 12352 // CHECK12: cond.end: 12353 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 12354 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12355 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12356 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 12357 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12358 // CHECK12: omp.inner.for.cond: 12359 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 12360 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !33 12361 // CHECK12-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 12362 // CHECK12-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12363 // CHECK12: omp.inner.for.body: 12364 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 12365 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 12366 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 12367 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !33 12368 // CHECK12-NEXT: call void @_Z3fn4v(), !llvm.access.group !33 12369 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12370 // CHECK12: omp.body.continue: 12371 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12372 // CHECK12: omp.inner.for.inc: 12373 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 12374 // CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 12375 // CHECK12-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !33 12376 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP34:![0-9]+]] 12377 // CHECK12: omp.inner.for.end: 12378 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12379 // CHECK12: omp.loop.exit: 12380 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 12381 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12382 // CHECK12-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 12383 // CHECK12-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12384 // CHECK12: .omp.final.then: 12385 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12386 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12387 // CHECK12: .omp.final.done: 12388 // CHECK12-NEXT: ret void 12389 // 12390 // 12391 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l90 12392 // CHECK12-SAME: () #[[ATTR1]] { 12393 // CHECK12-NEXT: entry: 12394 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..6 to void (i32*, i32*, ...)*)) 12395 // CHECK12-NEXT: ret void 12396 // 12397 // 12398 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 12399 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 12400 // CHECK12-NEXT: entry: 12401 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12402 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12403 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12404 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12405 // CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 12406 // CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 12407 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12408 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12409 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 12410 // CHECK12-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 12411 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12412 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12413 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 12414 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 12415 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12416 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12417 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12418 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 12419 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12420 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12421 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 12422 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12423 // CHECK12: cond.true: 12424 // CHECK12-NEXT: br label [[COND_END:%.*]] 12425 // CHECK12: cond.false: 12426 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12427 // CHECK12-NEXT: br label [[COND_END]] 12428 // CHECK12: cond.end: 12429 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 12430 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 12431 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 12432 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 12433 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12434 // CHECK12: omp.inner.for.cond: 12435 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12436 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12437 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 12438 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12439 // CHECK12: omp.inner.for.body: 12440 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 12441 // CHECK12-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 12442 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12443 // CHECK12-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 12444 // CHECK12-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 12445 // CHECK12-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12446 // CHECK12-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 12447 // CHECK12-NEXT: call void @.omp_outlined..7(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]] 12448 // CHECK12-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 12449 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12450 // CHECK12: omp.inner.for.inc: 12451 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12452 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 12453 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 12454 // CHECK12-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 12455 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP36:![0-9]+]] 12456 // CHECK12: omp.inner.for.end: 12457 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12458 // CHECK12: omp.loop.exit: 12459 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 12460 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12461 // CHECK12-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 12462 // CHECK12-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12463 // CHECK12: .omp.final.then: 12464 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12465 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12466 // CHECK12: .omp.final.done: 12467 // CHECK12-NEXT: ret void 12468 // 12469 // 12470 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 12471 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 12472 // CHECK12-NEXT: entry: 12473 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12474 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12475 // CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 12476 // CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 12477 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12478 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12479 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12480 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12481 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12482 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12483 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 12484 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12485 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12486 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12487 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12488 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12489 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 12490 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12491 // CHECK12-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 12492 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12493 // CHECK12-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 12494 // CHECK12-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 12495 // CHECK12-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 12496 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12497 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12498 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12499 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 12500 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12501 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12502 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 12503 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12504 // CHECK12: cond.true: 12505 // CHECK12-NEXT: br label [[COND_END:%.*]] 12506 // CHECK12: cond.false: 12507 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12508 // CHECK12-NEXT: br label [[COND_END]] 12509 // CHECK12: cond.end: 12510 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 12511 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12512 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12513 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 12514 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12515 // CHECK12: omp.inner.for.cond: 12516 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12517 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12518 // CHECK12-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 12519 // CHECK12-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12520 // CHECK12: omp.inner.for.body: 12521 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12522 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 12523 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 12524 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4 12525 // CHECK12-NEXT: call void @_Z3fn5v() 12526 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12527 // CHECK12: omp.body.continue: 12528 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12529 // CHECK12: omp.inner.for.inc: 12530 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12531 // CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 12532 // CHECK12-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4 12533 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP38:![0-9]+]] 12534 // CHECK12: omp.inner.for.end: 12535 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12536 // CHECK12: omp.loop.exit: 12537 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 12538 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12539 // CHECK12-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 12540 // CHECK12-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12541 // CHECK12: .omp.final.then: 12542 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12543 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12544 // CHECK12: .omp.final.done: 12545 // CHECK12-NEXT: ret void 12546 // 12547 // 12548 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l99 12549 // CHECK12-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 12550 // CHECK12-NEXT: entry: 12551 // CHECK12-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 12552 // CHECK12-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 12553 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 12554 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]]) 12555 // CHECK12-NEXT: ret void 12556 // 12557 // 12558 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..8 12559 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 12560 // CHECK12-NEXT: entry: 12561 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12562 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12563 // CHECK12-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 12564 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 12565 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12566 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12567 // CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 12568 // CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 12569 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12570 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12571 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 12572 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 12573 // CHECK12-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 12574 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__CASTED12:%.*]] = alloca i64, align 8 12575 // CHECK12-NEXT: [[DOTBOUND_ZERO_ADDR18:%.*]] = alloca i32, align 4 12576 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12577 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12578 // CHECK12-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 12579 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 12580 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 12581 // CHECK12-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0 12582 // CHECK12-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 12583 // CHECK12-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 12584 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 12585 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 12586 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12587 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12588 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12589 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 12590 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12591 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12592 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 12593 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12594 // CHECK12: cond.true: 12595 // CHECK12-NEXT: br label [[COND_END:%.*]] 12596 // CHECK12: cond.false: 12597 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12598 // CHECK12-NEXT: br label [[COND_END]] 12599 // CHECK12: cond.end: 12600 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 12601 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 12602 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 12603 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 12604 // CHECK12-NEXT: [[TMP7:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 12605 // CHECK12-NEXT: [[TOBOOL1:%.*]] = trunc i8 [[TMP7]] to i1 12606 // CHECK12-NEXT: br i1 [[TOBOOL1]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE7:%.*]] 12607 // CHECK12: omp_if.then: 12608 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12609 // CHECK12: omp.inner.for.cond: 12610 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 12611 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39 12612 // CHECK12-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 12613 // CHECK12-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12614 // CHECK12: omp.inner.for.body: 12615 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !39 12616 // CHECK12-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 12617 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !39 12618 // CHECK12-NEXT: [[TMP13:%.*]] = zext i32 [[TMP12]] to i64 12619 // CHECK12-NEXT: [[TMP14:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !39 12620 // CHECK12-NEXT: [[TOBOOL3:%.*]] = trunc i8 [[TMP14]] to i1 12621 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i8* 12622 // CHECK12-NEXT: [[FROMBOOL4:%.*]] = zext i1 [[TOBOOL3]] to i8 12623 // CHECK12-NEXT: store i8 [[FROMBOOL4]], i8* [[CONV]], align 1, !llvm.access.group !39 12624 // CHECK12-NEXT: [[TMP15:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !39 12625 // CHECK12-NEXT: [[TMP16:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1, !llvm.access.group !39 12626 // CHECK12-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP16]] to i1 12627 // CHECK12-NEXT: br i1 [[TOBOOL5]], label [[OMP_IF_THEN6:%.*]], label [[OMP_IF_ELSE:%.*]] 12628 // CHECK12: omp_if.then6: 12629 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP11]], i64 [[TMP13]], i64 [[TMP15]]), !llvm.access.group !39 12630 // CHECK12-NEXT: br label [[OMP_IF_END:%.*]] 12631 // CHECK12: omp_if.else: 12632 // CHECK12-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]), !llvm.access.group !39 12633 // CHECK12-NEXT: [[TMP17:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !39 12634 // CHECK12-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !39 12635 // CHECK12-NEXT: call void @.omp_outlined..9(i32* [[TMP17]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP11]], i64 [[TMP13]], i64 [[TMP15]]) #[[ATTR2]], !llvm.access.group !39 12636 // CHECK12-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]), !llvm.access.group !39 12637 // CHECK12-NEXT: br label [[OMP_IF_END]] 12638 // CHECK12: omp_if.end: 12639 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12640 // CHECK12: omp.inner.for.inc: 12641 // CHECK12-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 12642 // CHECK12-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !39 12643 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 12644 // CHECK12-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !39 12645 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP40:![0-9]+]] 12646 // CHECK12: omp.inner.for.end: 12647 // CHECK12-NEXT: br label [[OMP_IF_END23:%.*]] 12648 // CHECK12: omp_if.else7: 12649 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND8:%.*]] 12650 // CHECK12: omp.inner.for.cond8: 12651 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12652 // CHECK12-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12653 // CHECK12-NEXT: [[CMP9:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] 12654 // CHECK12-NEXT: br i1 [[CMP9]], label [[OMP_INNER_FOR_BODY10:%.*]], label [[OMP_INNER_FOR_END22:%.*]] 12655 // CHECK12: omp.inner.for.body10: 12656 // CHECK12-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 12657 // CHECK12-NEXT: [[TMP23:%.*]] = zext i32 [[TMP22]] to i64 12658 // CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 12659 // CHECK12-NEXT: [[TMP25:%.*]] = zext i32 [[TMP24]] to i64 12660 // CHECK12-NEXT: [[TMP26:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 12661 // CHECK12-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP26]] to i1 12662 // CHECK12-NEXT: [[CONV13:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED12]] to i8* 12663 // CHECK12-NEXT: [[FROMBOOL14:%.*]] = zext i1 [[TOBOOL11]] to i8 12664 // CHECK12-NEXT: store i8 [[FROMBOOL14]], i8* [[CONV13]], align 1 12665 // CHECK12-NEXT: [[TMP27:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED12]], align 8 12666 // CHECK12-NEXT: [[TMP28:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 12667 // CHECK12-NEXT: [[TOBOOL15:%.*]] = trunc i8 [[TMP28]] to i1 12668 // CHECK12-NEXT: br i1 [[TOBOOL15]], label [[OMP_IF_THEN16:%.*]], label [[OMP_IF_ELSE17:%.*]] 12669 // CHECK12: omp_if.then16: 12670 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP23]], i64 [[TMP25]], i64 [[TMP27]]) 12671 // CHECK12-NEXT: br label [[OMP_IF_END19:%.*]] 12672 // CHECK12: omp_if.else17: 12673 // CHECK12-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 12674 // CHECK12-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12675 // CHECK12-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR18]], align 4 12676 // CHECK12-NEXT: call void @.omp_outlined..10(i32* [[TMP29]], i32* [[DOTBOUND_ZERO_ADDR18]], i64 [[TMP23]], i64 [[TMP25]], i64 [[TMP27]]) #[[ATTR2]] 12677 // CHECK12-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP3]]) 12678 // CHECK12-NEXT: br label [[OMP_IF_END19]] 12679 // CHECK12: omp_if.end19: 12680 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC20:%.*]] 12681 // CHECK12: omp.inner.for.inc20: 12682 // CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12683 // CHECK12-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 12684 // CHECK12-NEXT: [[ADD21:%.*]] = add nsw i32 [[TMP30]], [[TMP31]] 12685 // CHECK12-NEXT: store i32 [[ADD21]], i32* [[DOTOMP_IV]], align 4 12686 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND8]], !llvm.loop [[LOOP42:![0-9]+]] 12687 // CHECK12: omp.inner.for.end22: 12688 // CHECK12-NEXT: br label [[OMP_IF_END23]] 12689 // CHECK12: omp_if.end23: 12690 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12691 // CHECK12: omp.loop.exit: 12692 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 12693 // CHECK12-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12694 // CHECK12-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 12695 // CHECK12-NEXT: br i1 [[TMP33]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12696 // CHECK12: .omp.final.then: 12697 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12698 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12699 // CHECK12: .omp.final.done: 12700 // CHECK12-NEXT: ret void 12701 // 12702 // 12703 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..9 12704 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 12705 // CHECK12-NEXT: entry: 12706 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12707 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12708 // CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 12709 // CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 12710 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 12711 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12712 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12713 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12714 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12715 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12716 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12717 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 12718 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12719 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12720 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12721 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12722 // CHECK12-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 12723 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 12724 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12725 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 12726 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12727 // CHECK12-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32 12728 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12729 // CHECK12-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 12730 // CHECK12-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4 12731 // CHECK12-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4 12732 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12733 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12734 // CHECK12-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1 12735 // CHECK12-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 12736 // CHECK12-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 12737 // CHECK12: omp_if.then: 12738 // CHECK12-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12739 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 12740 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12741 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12742 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 12743 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12744 // CHECK12: cond.true: 12745 // CHECK12-NEXT: br label [[COND_END:%.*]] 12746 // CHECK12: cond.false: 12747 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12748 // CHECK12-NEXT: br label [[COND_END]] 12749 // CHECK12: cond.end: 12750 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 12751 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12752 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12753 // CHECK12-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 12754 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12755 // CHECK12: omp.inner.for.cond: 12756 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 12757 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !43 12758 // CHECK12-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 12759 // CHECK12-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12760 // CHECK12: omp.inner.for.body: 12761 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 12762 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 12763 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 12764 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !43 12765 // CHECK12-NEXT: call void @_Z3fn6v(), !llvm.access.group !43 12766 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12767 // CHECK12: omp.body.continue: 12768 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12769 // CHECK12: omp.inner.for.inc: 12770 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 12771 // CHECK12-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 12772 // CHECK12-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 12773 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP44:![0-9]+]] 12774 // CHECK12: omp.inner.for.end: 12775 // CHECK12-NEXT: br label [[OMP_IF_END:%.*]] 12776 // CHECK12: omp_if.else: 12777 // CHECK12-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12778 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 12779 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12780 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12781 // CHECK12-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP14]], 99 12782 // CHECK12-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]] 12783 // CHECK12: cond.true6: 12784 // CHECK12-NEXT: br label [[COND_END8:%.*]] 12785 // CHECK12: cond.false7: 12786 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12787 // CHECK12-NEXT: br label [[COND_END8]] 12788 // CHECK12: cond.end8: 12789 // CHECK12-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP15]], [[COND_FALSE7]] ] 12790 // CHECK12-NEXT: store i32 [[COND9]], i32* [[DOTOMP_UB]], align 4 12791 // CHECK12-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12792 // CHECK12-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4 12793 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] 12794 // CHECK12: omp.inner.for.cond10: 12795 // CHECK12-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12796 // CHECK12-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12797 // CHECK12-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 12798 // CHECK12-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 12799 // CHECK12: omp.inner.for.body12: 12800 // CHECK12-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12801 // CHECK12-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1 12802 // CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 12803 // CHECK12-NEXT: store i32 [[ADD14]], i32* [[I]], align 4 12804 // CHECK12-NEXT: call void @_Z3fn6v() 12805 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 12806 // CHECK12: omp.body.continue15: 12807 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 12808 // CHECK12: omp.inner.for.inc16: 12809 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12810 // CHECK12-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP20]], 1 12811 // CHECK12-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4 12812 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP46:![0-9]+]] 12813 // CHECK12: omp.inner.for.end18: 12814 // CHECK12-NEXT: br label [[OMP_IF_END]] 12815 // CHECK12: omp_if.end: 12816 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12817 // CHECK12: omp.loop.exit: 12818 // CHECK12-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12819 // CHECK12-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4 12820 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]]) 12821 // CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12822 // CHECK12-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 12823 // CHECK12-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12824 // CHECK12: .omp.final.then: 12825 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12826 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12827 // CHECK12: .omp.final.done: 12828 // CHECK12-NEXT: ret void 12829 // 12830 // 12831 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 12832 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] { 12833 // CHECK12-NEXT: entry: 12834 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 12835 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 12836 // CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 12837 // CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 12838 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8 12839 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 12840 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12841 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 12842 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 12843 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 12844 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 12845 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 12846 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 12847 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 12848 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12849 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12850 // CHECK12-NEXT: store i64 [[DOTCAPTURE_EXPR_]], i64* [[DOTCAPTURE_EXPR__ADDR]], align 8 12851 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__ADDR]] to i8* 12852 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 12853 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 12854 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 12855 // CHECK12-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP0]] to i32 12856 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 12857 // CHECK12-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP1]] to i32 12858 // CHECK12-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_LB]], align 4 12859 // CHECK12-NEXT: store i32 [[CONV2]], i32* [[DOTOMP_UB]], align 4 12860 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 12861 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 12862 // CHECK12-NEXT: [[TMP2:%.*]] = load i8, i8* [[CONV]], align 1 12863 // CHECK12-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP2]] to i1 12864 // CHECK12-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 12865 // CHECK12: omp_if.then: 12866 // CHECK12-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12867 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 12868 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12869 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12870 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP5]], 99 12871 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 12872 // CHECK12: cond.true: 12873 // CHECK12-NEXT: br label [[COND_END:%.*]] 12874 // CHECK12: cond.false: 12875 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12876 // CHECK12-NEXT: br label [[COND_END]] 12877 // CHECK12: cond.end: 12878 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP6]], [[COND_FALSE]] ] 12879 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 12880 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12881 // CHECK12-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_IV]], align 4 12882 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 12883 // CHECK12: omp.inner.for.cond: 12884 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 12885 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !47 12886 // CHECK12-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 12887 // CHECK12-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 12888 // CHECK12: omp.inner.for.body: 12889 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 12890 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 12891 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 12892 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !47 12893 // CHECK12-NEXT: call void @_Z3fn6v(), !llvm.access.group !47 12894 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 12895 // CHECK12: omp.body.continue: 12896 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 12897 // CHECK12: omp.inner.for.inc: 12898 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 12899 // CHECK12-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 12900 // CHECK12-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 12901 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP48:![0-9]+]] 12902 // CHECK12: omp.inner.for.end: 12903 // CHECK12-NEXT: br label [[OMP_IF_END:%.*]] 12904 // CHECK12: omp_if.else: 12905 // CHECK12-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12906 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 12907 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP13]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 12908 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12909 // CHECK12-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP14]], 99 12910 // CHECK12-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]] 12911 // CHECK12: cond.true6: 12912 // CHECK12-NEXT: br label [[COND_END8:%.*]] 12913 // CHECK12: cond.false7: 12914 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12915 // CHECK12-NEXT: br label [[COND_END8]] 12916 // CHECK12: cond.end8: 12917 // CHECK12-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP15]], [[COND_FALSE7]] ] 12918 // CHECK12-NEXT: store i32 [[COND9]], i32* [[DOTOMP_UB]], align 4 12919 // CHECK12-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 12920 // CHECK12-NEXT: store i32 [[TMP16]], i32* [[DOTOMP_IV]], align 4 12921 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND10:%.*]] 12922 // CHECK12: omp.inner.for.cond10: 12923 // CHECK12-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12924 // CHECK12-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 12925 // CHECK12-NEXT: [[CMP11:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 12926 // CHECK12-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY12:%.*]], label [[OMP_INNER_FOR_END18:%.*]] 12927 // CHECK12: omp.inner.for.body12: 12928 // CHECK12-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12929 // CHECK12-NEXT: [[MUL13:%.*]] = mul nsw i32 [[TMP19]], 1 12930 // CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 0, [[MUL13]] 12931 // CHECK12-NEXT: store i32 [[ADD14]], i32* [[I]], align 4 12932 // CHECK12-NEXT: call void @_Z3fn6v() 12933 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE15:%.*]] 12934 // CHECK12: omp.body.continue15: 12935 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC16:%.*]] 12936 // CHECK12: omp.inner.for.inc16: 12937 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 12938 // CHECK12-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP20]], 1 12939 // CHECK12-NEXT: store i32 [[ADD17]], i32* [[DOTOMP_IV]], align 4 12940 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND10]], !llvm.loop [[LOOP50:![0-9]+]] 12941 // CHECK12: omp.inner.for.end18: 12942 // CHECK12-NEXT: br label [[OMP_IF_END]] 12943 // CHECK12: omp_if.end: 12944 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 12945 // CHECK12: omp.loop.exit: 12946 // CHECK12-NEXT: [[TMP21:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 12947 // CHECK12-NEXT: [[TMP22:%.*]] = load i32, i32* [[TMP21]], align 4 12948 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP22]]) 12949 // CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 12950 // CHECK12-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 12951 // CHECK12-NEXT: br i1 [[TMP24]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 12952 // CHECK12: .omp.final.then: 12953 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 12954 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 12955 // CHECK12: .omp.final.done: 12956 // CHECK12-NEXT: ret void 12957 // 12958 // 12959 // CHECK12-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 12960 // CHECK12-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 12961 // CHECK12-NEXT: entry: 12962 // CHECK12-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 12963 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 12964 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 12965 // CHECK12-NEXT: [[ARG_CASTED:%.*]] = alloca i64, align 8 12966 // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 12967 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 12968 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 12969 // CHECK12-NEXT: [[_TMP4:%.*]] = alloca i32, align 4 12970 // CHECK12-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 12971 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 12972 // CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 12973 // CHECK12-NEXT: [[TMP1:%.*]] = icmp ne i32 [[TMP0]], 0 12974 // CHECK12-NEXT: br i1 [[TMP1]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 12975 // CHECK12: omp_offload.failed: 12976 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59() #[[ATTR2]] 12977 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] 12978 // CHECK12: omp_offload.cont: 12979 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 12980 // CHECK12-NEXT: [[TMP2:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65.region_id, i32 0, i8** null, i8** null, i64* null, i64* null, i8** null, i8** null, i32 0, i32 1) 12981 // CHECK12-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 12982 // CHECK12-NEXT: br i1 [[TMP3]], label [[OMP_OFFLOAD_FAILED2:%.*]], label [[OMP_OFFLOAD_CONT3:%.*]] 12983 // CHECK12: omp_offload.failed2: 12984 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65() #[[ATTR2]] 12985 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT3]] 12986 // CHECK12: omp_offload.cont3: 12987 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 12988 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_CASTED]] to i32* 12989 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[CONV]], align 4 12990 // CHECK12-NEXT: [[TMP5:%.*]] = load i64, i64* [[ARG_CASTED]], align 8 12991 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 12992 // CHECK12-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i64* 12993 // CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP7]], align 8 12994 // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 12995 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* 12996 // CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP9]], align 8 12997 // CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 12998 // CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 8 12999 // CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 13000 // CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 13001 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 100) 13002 // CHECK12-NEXT: [[TMP13:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71.region_id, i32 1, i8** [[TMP11]], i8** [[TMP12]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 13003 // CHECK12-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 13004 // CHECK12-NEXT: br i1 [[TMP14]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 13005 // CHECK12: omp_offload.failed5: 13006 // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71(i64 [[TMP5]]) #[[ATTR2]] 13007 // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT6]] 13008 // CHECK12: omp_offload.cont6: 13009 // CHECK12-NEXT: ret i32 0 13010 // 13011 // 13012 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l59 13013 // CHECK12-SAME: () #[[ATTR1]] { 13014 // CHECK12-NEXT: entry: 13015 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*)) 13016 // CHECK12-NEXT: ret void 13017 // 13018 // 13019 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11 13020 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 13021 // CHECK12-NEXT: entry: 13022 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 13023 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 13024 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13025 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 13026 // CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 13027 // CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 13028 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 13029 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13030 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 13031 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 13032 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 13033 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 13034 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 13035 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 13036 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13037 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 13038 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 13039 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 13040 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 13041 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 13042 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13043 // CHECK12: cond.true: 13044 // CHECK12-NEXT: br label [[COND_END:%.*]] 13045 // CHECK12: cond.false: 13046 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 13047 // CHECK12-NEXT: br label [[COND_END]] 13048 // CHECK12: cond.end: 13049 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 13050 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 13051 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 13052 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 13053 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13054 // CHECK12: omp.inner.for.cond: 13055 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 13056 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51 13057 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 13058 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13059 // CHECK12: omp.inner.for.body: 13060 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !51 13061 // CHECK12-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 13062 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !51 13063 // CHECK12-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 13064 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP8]], i64 [[TMP10]]), !llvm.access.group !51 13065 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13066 // CHECK12: omp.inner.for.inc: 13067 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 13068 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !51 13069 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 13070 // CHECK12-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !51 13071 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP52:![0-9]+]] 13072 // CHECK12: omp.inner.for.end: 13073 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13074 // CHECK12: omp.loop.exit: 13075 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 13076 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13077 // CHECK12-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 13078 // CHECK12-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 13079 // CHECK12: .omp.final.then: 13080 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 13081 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 13082 // CHECK12: .omp.final.done: 13083 // CHECK12-NEXT: ret void 13084 // 13085 // 13086 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..12 13087 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 13088 // CHECK12-NEXT: entry: 13089 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 13090 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 13091 // CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 13092 // CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 13093 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13094 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 13095 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13096 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13097 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 13098 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13099 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 13100 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 13101 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 13102 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 13103 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 13104 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13105 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 13106 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 13107 // CHECK12-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 13108 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 13109 // CHECK12-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 13110 // CHECK12-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 13111 // CHECK12-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 13112 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 13113 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13114 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 13115 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 13116 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 13117 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13118 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 13119 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13120 // CHECK12: cond.true: 13121 // CHECK12-NEXT: br label [[COND_END:%.*]] 13122 // CHECK12: cond.false: 13123 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13124 // CHECK12-NEXT: br label [[COND_END]] 13125 // CHECK12: cond.end: 13126 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 13127 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 13128 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13129 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 13130 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13131 // CHECK12: omp.inner.for.cond: 13132 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 13133 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !54 13134 // CHECK12-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 13135 // CHECK12-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13136 // CHECK12: omp.inner.for.body: 13137 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 13138 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 13139 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 13140 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !54 13141 // CHECK12-NEXT: call void @_Z3fn1v(), !llvm.access.group !54 13142 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13143 // CHECK12: omp.body.continue: 13144 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13145 // CHECK12: omp.inner.for.inc: 13146 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 13147 // CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 13148 // CHECK12-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !54 13149 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP55:![0-9]+]] 13150 // CHECK12: omp.inner.for.end: 13151 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13152 // CHECK12: omp.loop.exit: 13153 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 13154 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13155 // CHECK12-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 13156 // CHECK12-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 13157 // CHECK12: .omp.final.then: 13158 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 13159 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 13160 // CHECK12: .omp.final.done: 13161 // CHECK12-NEXT: ret void 13162 // 13163 // 13164 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l65 13165 // CHECK12-SAME: () #[[ATTR1]] { 13166 // CHECK12-NEXT: entry: 13167 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined..13 to void (i32*, i32*, ...)*)) 13168 // CHECK12-NEXT: ret void 13169 // 13170 // 13171 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..13 13172 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 13173 // CHECK12-NEXT: entry: 13174 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 13175 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 13176 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13177 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 13178 // CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 13179 // CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 13180 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 13181 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13182 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 13183 // CHECK12-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 13184 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 13185 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 13186 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 13187 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 13188 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 13189 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13190 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 13191 // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 13192 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 13193 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 13194 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 99 13195 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13196 // CHECK12: cond.true: 13197 // CHECK12-NEXT: br label [[COND_END:%.*]] 13198 // CHECK12: cond.false: 13199 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 13200 // CHECK12-NEXT: br label [[COND_END]] 13201 // CHECK12: cond.end: 13202 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ] 13203 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 13204 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 13205 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_IV]], align 4 13206 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13207 // CHECK12: omp.inner.for.cond: 13208 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 13209 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 13210 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]] 13211 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13212 // CHECK12: omp.inner.for.body: 13213 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 13214 // CHECK12-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64 13215 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 13216 // CHECK12-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64 13217 // CHECK12-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 13218 // CHECK12-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 13219 // CHECK12-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 13220 // CHECK12-NEXT: call void @.omp_outlined..14(i32* [[TMP11]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP8]], i64 [[TMP10]]) #[[ATTR2]] 13221 // CHECK12-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP1]]) 13222 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13223 // CHECK12: omp.inner.for.inc: 13224 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 13225 // CHECK12-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 13226 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 13227 // CHECK12-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 13228 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP57:![0-9]+]] 13229 // CHECK12: omp.inner.for.end: 13230 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13231 // CHECK12: omp.loop.exit: 13232 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]]) 13233 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13234 // CHECK12-NEXT: [[TMP15:%.*]] = icmp ne i32 [[TMP14]], 0 13235 // CHECK12-NEXT: br i1 [[TMP15]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 13236 // CHECK12: .omp.final.then: 13237 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 13238 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 13239 // CHECK12: .omp.final.done: 13240 // CHECK12-NEXT: ret void 13241 // 13242 // 13243 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..14 13244 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 13245 // CHECK12-NEXT: entry: 13246 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 13247 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 13248 // CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 13249 // CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 13250 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13251 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 13252 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13253 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13254 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 13255 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13256 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 13257 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 13258 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 13259 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 13260 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 13261 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13262 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 13263 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 13264 // CHECK12-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 13265 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 13266 // CHECK12-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 13267 // CHECK12-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 13268 // CHECK12-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 13269 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 13270 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13271 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 13272 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 13273 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 13274 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13275 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 13276 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13277 // CHECK12: cond.true: 13278 // CHECK12-NEXT: br label [[COND_END:%.*]] 13279 // CHECK12: cond.false: 13280 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13281 // CHECK12-NEXT: br label [[COND_END]] 13282 // CHECK12: cond.end: 13283 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 13284 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 13285 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13286 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 13287 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13288 // CHECK12: omp.inner.for.cond: 13289 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 13290 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13291 // CHECK12-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 13292 // CHECK12-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13293 // CHECK12: omp.inner.for.body: 13294 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 13295 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 13296 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 13297 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4 13298 // CHECK12-NEXT: call void @_Z3fn2v() 13299 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13300 // CHECK12: omp.body.continue: 13301 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13302 // CHECK12: omp.inner.for.inc: 13303 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 13304 // CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 13305 // CHECK12-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4 13306 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP58:![0-9]+]] 13307 // CHECK12: omp.inner.for.end: 13308 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13309 // CHECK12: omp.loop.exit: 13310 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 13311 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13312 // CHECK12-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 13313 // CHECK12-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 13314 // CHECK12: .omp.final.then: 13315 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 13316 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 13317 // CHECK12: .omp.final.done: 13318 // CHECK12-NEXT: ret void 13319 // 13320 // 13321 // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiEiT__l71 13322 // CHECK12-SAME: (i64 noundef [[ARG:%.*]]) #[[ATTR1]] { 13323 // CHECK12-NEXT: entry: 13324 // CHECK12-NEXT: [[ARG_ADDR:%.*]] = alloca i64, align 8 13325 // CHECK12-NEXT: store i64 [[ARG]], i64* [[ARG_ADDR]], align 8 13326 // CHECK12-NEXT: [[CONV:%.*]] = bitcast i64* [[ARG_ADDR]] to i32* 13327 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32* [[CONV]]) 13328 // CHECK12-NEXT: ret void 13329 // 13330 // 13331 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15 13332 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[ARG:%.*]]) #[[ATTR1]] { 13333 // CHECK12-NEXT: entry: 13334 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 13335 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 13336 // CHECK12-NEXT: [[ARG_ADDR:%.*]] = alloca i32*, align 8 13337 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13338 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 13339 // CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 13340 // CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 13341 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 13342 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13343 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 13344 // CHECK12-NEXT: [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4 13345 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 13346 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 13347 // CHECK12-NEXT: store i32* [[ARG]], i32** [[ARG_ADDR]], align 8 13348 // CHECK12-NEXT: [[TMP0:%.*]] = load i32*, i32** [[ARG_ADDR]], align 8 13349 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 13350 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_COMB_UB]], align 4 13351 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 13352 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13353 // CHECK12-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 13354 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 13355 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 13356 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 13357 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99 13358 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13359 // CHECK12: cond.true: 13360 // CHECK12-NEXT: br label [[COND_END:%.*]] 13361 // CHECK12: cond.false: 13362 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 13363 // CHECK12-NEXT: br label [[COND_END]] 13364 // CHECK12: cond.end: 13365 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 13366 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 13367 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 13368 // CHECK12-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 13369 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13370 // CHECK12: omp.inner.for.cond: 13371 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 13372 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59 13373 // CHECK12-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 13374 // CHECK12-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13375 // CHECK12: omp.inner.for.body: 13376 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !59 13377 // CHECK12-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 13378 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !59 13379 // CHECK12-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 13380 // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP0]], align 4, !llvm.access.group !59 13381 // CHECK12-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP12]], 0 13382 // CHECK12-NEXT: br i1 [[TOBOOL]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 13383 // CHECK12: omp_if.then: 13384 // CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]]), !llvm.access.group !59 13385 // CHECK12-NEXT: br label [[OMP_IF_END:%.*]] 13386 // CHECK12: omp_if.else: 13387 // CHECK12-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !59 13388 // CHECK12-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8, !llvm.access.group !59 13389 // CHECK12-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4, !llvm.access.group !59 13390 // CHECK12-NEXT: call void @.omp_outlined..16(i32* [[TMP13]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP9]], i64 [[TMP11]]) #[[ATTR2]], !llvm.access.group !59 13391 // CHECK12-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]), !llvm.access.group !59 13392 // CHECK12-NEXT: br label [[OMP_IF_END]] 13393 // CHECK12: omp_if.end: 13394 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13395 // CHECK12: omp.inner.for.inc: 13396 // CHECK12-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 13397 // CHECK12-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4, !llvm.access.group !59 13398 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 13399 // CHECK12-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 13400 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP60:![0-9]+]] 13401 // CHECK12: omp.inner.for.end: 13402 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13403 // CHECK12: omp.loop.exit: 13404 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 13405 // CHECK12-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13406 // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 13407 // CHECK12-NEXT: br i1 [[TMP17]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 13408 // CHECK12: .omp.final.then: 13409 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 13410 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 13411 // CHECK12: .omp.final.done: 13412 // CHECK12-NEXT: ret void 13413 // 13414 // 13415 // CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..16 13416 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]]) #[[ATTR1]] { 13417 // CHECK12-NEXT: entry: 13418 // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 13419 // CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 13420 // CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8 13421 // CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8 13422 // CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13423 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 13424 // CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13425 // CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13426 // CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 13427 // CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 13428 // CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4 13429 // CHECK12-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 13430 // CHECK12-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 13431 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_LB_]], i64* [[DOTPREVIOUS_LB__ADDR]], align 8 13432 // CHECK12-NEXT: store i64 [[DOTPREVIOUS_UB_]], i64* [[DOTPREVIOUS_UB__ADDR]], align 8 13433 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13434 // CHECK12-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 13435 // CHECK12-NEXT: [[TMP0:%.*]] = load i64, i64* [[DOTPREVIOUS_LB__ADDR]], align 8 13436 // CHECK12-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32 13437 // CHECK12-NEXT: [[TMP1:%.*]] = load i64, i64* [[DOTPREVIOUS_UB__ADDR]], align 8 13438 // CHECK12-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 13439 // CHECK12-NEXT: store i32 [[CONV]], i32* [[DOTOMP_LB]], align 4 13440 // CHECK12-NEXT: store i32 [[CONV1]], i32* [[DOTOMP_UB]], align 4 13441 // CHECK12-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 13442 // CHECK12-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 13443 // CHECK12-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 13444 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 13445 // CHECK12-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 13446 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13447 // CHECK12-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 99 13448 // CHECK12-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 13449 // CHECK12: cond.true: 13450 // CHECK12-NEXT: br label [[COND_END:%.*]] 13451 // CHECK12: cond.false: 13452 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 13453 // CHECK12-NEXT: br label [[COND_END]] 13454 // CHECK12: cond.end: 13455 // CHECK12-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 13456 // CHECK12-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 13457 // CHECK12-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13458 // CHECK12-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 13459 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13460 // CHECK12: omp.inner.for.cond: 13461 // CHECK12-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62 13462 // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !62 13463 // CHECK12-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 13464 // CHECK12-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13465 // CHECK12: omp.inner.for.body: 13466 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62 13467 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 13468 // CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 13469 // CHECK12-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !62 13470 // CHECK12-NEXT: call void @_Z3fn3v(), !llvm.access.group !62 13471 // CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13472 // CHECK12: omp.body.continue: 13473 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13474 // CHECK12: omp.inner.for.inc: 13475 // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62 13476 // CHECK12-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1 13477 // CHECK12-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !62 13478 // CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP63:![0-9]+]] 13479 // CHECK12: omp.inner.for.end: 13480 // CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 13481 // CHECK12: omp.loop.exit: 13482 // CHECK12-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 13483 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 13484 // CHECK12-NEXT: [[TMP12:%.*]] = icmp ne i32 [[TMP11]], 0 13485 // CHECK12-NEXT: br i1 [[TMP12]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 13486 // CHECK12: .omp.final.then: 13487 // CHECK12-NEXT: store i32 100, i32* [[I]], align 4 13488 // CHECK12-NEXT: br label [[DOTOMP_FINAL_DONE]] 13489 // CHECK12: .omp.final.done: 13490 // CHECK12-NEXT: ret void 13491 // 13492 // 13493 // CHECK12-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 13494 // CHECK12-SAME: () #[[ATTR5:[0-9]+]] { 13495 // CHECK12-NEXT: entry: 13496 // CHECK12-NEXT: call void @__tgt_register_requires(i64 1) 13497 // CHECK12-NEXT: ret void 13498 // 13499 // 13500 // CHECK13-LABEL: define {{[^@]+}}@_Z9gtid_testv 13501 // CHECK13-SAME: () #[[ATTR0:[0-9]+]] { 13502 // CHECK13-NEXT: entry: 13503 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 13504 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13505 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13506 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13507 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4 13508 // CHECK13-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 13509 // CHECK13-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 13510 // CHECK13-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 13511 // CHECK13-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 13512 // CHECK13-NEXT: [[I6:%.*]] = alloca i32, align 4 13513 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13514 // CHECK13-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 13515 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13516 // CHECK13-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 13517 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13518 // CHECK13: omp.inner.for.cond: 13519 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 13520 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 13521 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 13522 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13523 // CHECK13: omp.inner.for.body: 13524 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 13525 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 13526 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 13527 // CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 13528 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13529 // CHECK13: omp.body.continue: 13530 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13531 // CHECK13: omp.inner.for.inc: 13532 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 13533 // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 13534 // CHECK13-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 13535 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 13536 // CHECK13: omp.inner.for.end: 13537 // CHECK13-NEXT: store i32 100, i32* [[I]], align 4 13538 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 13539 // CHECK13-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 13540 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 13541 // CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 13542 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 13543 // CHECK13: omp.inner.for.cond7: 13544 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 13545 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !10 13546 // CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 13547 // CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 13548 // CHECK13: omp.inner.for.body9: 13549 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 13550 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 13551 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 13552 // CHECK13-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !10 13553 // CHECK13-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !10 13554 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 13555 // CHECK13: omp.body.continue12: 13556 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 13557 // CHECK13: omp.inner.for.inc13: 13558 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 13559 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 13560 // CHECK13-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 13561 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP11:![0-9]+]] 13562 // CHECK13: omp.inner.for.end15: 13563 // CHECK13-NEXT: store i32 100, i32* [[I6]], align 4 13564 // CHECK13-NEXT: ret void 13565 // 13566 // 13567 // CHECK13-LABEL: define {{[^@]+}}@main 13568 // CHECK13-SAME: () #[[ATTR1:[0-9]+]] { 13569 // CHECK13-NEXT: entry: 13570 // CHECK13-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 13571 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 13572 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13573 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13574 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13575 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4 13576 // CHECK13-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 13577 // CHECK13-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 13578 // CHECK13-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 13579 // CHECK13-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 13580 // CHECK13-NEXT: [[I6:%.*]] = alloca i32, align 4 13581 // CHECK13-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 13582 // CHECK13-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 13583 // CHECK13-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 13584 // CHECK13-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 13585 // CHECK13-NEXT: [[I20:%.*]] = alloca i32, align 4 13586 // CHECK13-NEXT: store i32 0, i32* [[RETVAL]], align 4 13587 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13588 // CHECK13-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 13589 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13590 // CHECK13-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 13591 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13592 // CHECK13: omp.inner.for.cond: 13593 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13594 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 13595 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 13596 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13597 // CHECK13: omp.inner.for.body: 13598 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13599 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 13600 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 13601 // CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13 13602 // CHECK13-NEXT: call void @_Z3fn4v(), !llvm.access.group !13 13603 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13604 // CHECK13: omp.body.continue: 13605 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13606 // CHECK13: omp.inner.for.inc: 13607 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13608 // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 13609 // CHECK13-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13610 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 13611 // CHECK13: omp.inner.for.end: 13612 // CHECK13-NEXT: store i32 100, i32* [[I]], align 4 13613 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 13614 // CHECK13-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 13615 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 13616 // CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 13617 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 13618 // CHECK13: omp.inner.for.cond7: 13619 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 13620 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !16 13621 // CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 13622 // CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 13623 // CHECK13: omp.inner.for.body9: 13624 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 13625 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 13626 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 13627 // CHECK13-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !16 13628 // CHECK13-NEXT: call void @_Z3fn5v(), !llvm.access.group !16 13629 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 13630 // CHECK13: omp.body.continue12: 13631 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 13632 // CHECK13: omp.inner.for.inc13: 13633 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 13634 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 13635 // CHECK13-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 13636 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP17:![0-9]+]] 13637 // CHECK13: omp.inner.for.end15: 13638 // CHECK13-NEXT: store i32 100, i32* [[I6]], align 4 13639 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 13640 // CHECK13-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 13641 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 13642 // CHECK13-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 13643 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 13644 // CHECK13: omp.inner.for.cond21: 13645 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !19 13646 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !19 13647 // CHECK13-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 13648 // CHECK13-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 13649 // CHECK13: omp.inner.for.body23: 13650 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !19 13651 // CHECK13-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 13652 // CHECK13-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 13653 // CHECK13-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !19 13654 // CHECK13-NEXT: call void @_Z3fn6v(), !llvm.access.group !19 13655 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 13656 // CHECK13: omp.body.continue26: 13657 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 13658 // CHECK13: omp.inner.for.inc27: 13659 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !19 13660 // CHECK13-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 13661 // CHECK13-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !19 13662 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP20:![0-9]+]] 13663 // CHECK13: omp.inner.for.end29: 13664 // CHECK13-NEXT: store i32 100, i32* [[I20]], align 4 13665 // CHECK13-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 13666 // CHECK13-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 13667 // CHECK13-NEXT: ret i32 [[CALL]] 13668 // 13669 // 13670 // CHECK13-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 13671 // CHECK13-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 13672 // CHECK13-NEXT: entry: 13673 // CHECK13-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 13674 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 13675 // CHECK13-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13676 // CHECK13-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13677 // CHECK13-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13678 // CHECK13-NEXT: [[I:%.*]] = alloca i32, align 4 13679 // CHECK13-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 13680 // CHECK13-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 13681 // CHECK13-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 13682 // CHECK13-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 13683 // CHECK13-NEXT: [[I6:%.*]] = alloca i32, align 4 13684 // CHECK13-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 13685 // CHECK13-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 13686 // CHECK13-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 13687 // CHECK13-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 13688 // CHECK13-NEXT: [[I20:%.*]] = alloca i32, align 4 13689 // CHECK13-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 13690 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13691 // CHECK13-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 13692 // CHECK13-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13693 // CHECK13-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 13694 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13695 // CHECK13: omp.inner.for.cond: 13696 // CHECK13-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 13697 // CHECK13-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22 13698 // CHECK13-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 13699 // CHECK13-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13700 // CHECK13: omp.inner.for.body: 13701 // CHECK13-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 13702 // CHECK13-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 13703 // CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 13704 // CHECK13-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22 13705 // CHECK13-NEXT: call void @_Z3fn1v(), !llvm.access.group !22 13706 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13707 // CHECK13: omp.body.continue: 13708 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13709 // CHECK13: omp.inner.for.inc: 13710 // CHECK13-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 13711 // CHECK13-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 13712 // CHECK13-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 13713 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 13714 // CHECK13: omp.inner.for.end: 13715 // CHECK13-NEXT: store i32 100, i32* [[I]], align 4 13716 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 13717 // CHECK13-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 13718 // CHECK13-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 13719 // CHECK13-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 13720 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 13721 // CHECK13: omp.inner.for.cond7: 13722 // CHECK13-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !25 13723 // CHECK13-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !25 13724 // CHECK13-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 13725 // CHECK13-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 13726 // CHECK13: omp.inner.for.body9: 13727 // CHECK13-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !25 13728 // CHECK13-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 13729 // CHECK13-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 13730 // CHECK13-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !25 13731 // CHECK13-NEXT: call void @_Z3fn2v(), !llvm.access.group !25 13732 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 13733 // CHECK13: omp.body.continue12: 13734 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 13735 // CHECK13: omp.inner.for.inc13: 13736 // CHECK13-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !25 13737 // CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 13738 // CHECK13-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !25 13739 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP26:![0-9]+]] 13740 // CHECK13: omp.inner.for.end15: 13741 // CHECK13-NEXT: store i32 100, i32* [[I6]], align 4 13742 // CHECK13-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 13743 // CHECK13-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 13744 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 13745 // CHECK13-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 13746 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 13747 // CHECK13: omp.inner.for.cond21: 13748 // CHECK13-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !28 13749 // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !28 13750 // CHECK13-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 13751 // CHECK13-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 13752 // CHECK13: omp.inner.for.body23: 13753 // CHECK13-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !28 13754 // CHECK13-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 13755 // CHECK13-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 13756 // CHECK13-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !28 13757 // CHECK13-NEXT: call void @_Z3fn3v(), !llvm.access.group !28 13758 // CHECK13-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 13759 // CHECK13: omp.body.continue26: 13760 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 13761 // CHECK13: omp.inner.for.inc27: 13762 // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !28 13763 // CHECK13-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 13764 // CHECK13-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !28 13765 // CHECK13-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP29:![0-9]+]] 13766 // CHECK13: omp.inner.for.end29: 13767 // CHECK13-NEXT: store i32 100, i32* [[I20]], align 4 13768 // CHECK13-NEXT: ret i32 0 13769 // 13770 // 13771 // CHECK14-LABEL: define {{[^@]+}}@_Z9gtid_testv 13772 // CHECK14-SAME: () #[[ATTR0:[0-9]+]] { 13773 // CHECK14-NEXT: entry: 13774 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 13775 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13776 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13777 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13778 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4 13779 // CHECK14-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 13780 // CHECK14-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 13781 // CHECK14-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 13782 // CHECK14-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 13783 // CHECK14-NEXT: [[I6:%.*]] = alloca i32, align 4 13784 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13785 // CHECK14-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 13786 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13787 // CHECK14-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 13788 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13789 // CHECK14: omp.inner.for.cond: 13790 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 13791 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 13792 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 13793 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13794 // CHECK14: omp.inner.for.body: 13795 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 13796 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 13797 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 13798 // CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 13799 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13800 // CHECK14: omp.body.continue: 13801 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13802 // CHECK14: omp.inner.for.inc: 13803 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 13804 // CHECK14-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 13805 // CHECK14-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 13806 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 13807 // CHECK14: omp.inner.for.end: 13808 // CHECK14-NEXT: store i32 100, i32* [[I]], align 4 13809 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 13810 // CHECK14-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 13811 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 13812 // CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 13813 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 13814 // CHECK14: omp.inner.for.cond7: 13815 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 13816 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !10 13817 // CHECK14-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 13818 // CHECK14-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 13819 // CHECK14: omp.inner.for.body9: 13820 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 13821 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 13822 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 13823 // CHECK14-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !10 13824 // CHECK14-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !10 13825 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 13826 // CHECK14: omp.body.continue12: 13827 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 13828 // CHECK14: omp.inner.for.inc13: 13829 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 13830 // CHECK14-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 13831 // CHECK14-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 13832 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP11:![0-9]+]] 13833 // CHECK14: omp.inner.for.end15: 13834 // CHECK14-NEXT: store i32 100, i32* [[I6]], align 4 13835 // CHECK14-NEXT: ret void 13836 // 13837 // 13838 // CHECK14-LABEL: define {{[^@]+}}@main 13839 // CHECK14-SAME: () #[[ATTR1:[0-9]+]] { 13840 // CHECK14-NEXT: entry: 13841 // CHECK14-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 13842 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 13843 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13844 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13845 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13846 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4 13847 // CHECK14-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 13848 // CHECK14-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 13849 // CHECK14-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 13850 // CHECK14-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 13851 // CHECK14-NEXT: [[I6:%.*]] = alloca i32, align 4 13852 // CHECK14-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 13853 // CHECK14-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 13854 // CHECK14-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 13855 // CHECK14-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 13856 // CHECK14-NEXT: [[I20:%.*]] = alloca i32, align 4 13857 // CHECK14-NEXT: store i32 0, i32* [[RETVAL]], align 4 13858 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13859 // CHECK14-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 13860 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13861 // CHECK14-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 13862 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13863 // CHECK14: omp.inner.for.cond: 13864 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13865 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 13866 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 13867 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13868 // CHECK14: omp.inner.for.body: 13869 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13870 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 13871 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 13872 // CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13 13873 // CHECK14-NEXT: call void @_Z3fn4v(), !llvm.access.group !13 13874 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13875 // CHECK14: omp.body.continue: 13876 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13877 // CHECK14: omp.inner.for.inc: 13878 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13879 // CHECK14-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 13880 // CHECK14-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 13881 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 13882 // CHECK14: omp.inner.for.end: 13883 // CHECK14-NEXT: store i32 100, i32* [[I]], align 4 13884 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 13885 // CHECK14-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 13886 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 13887 // CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 13888 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 13889 // CHECK14: omp.inner.for.cond7: 13890 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 13891 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !16 13892 // CHECK14-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 13893 // CHECK14-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 13894 // CHECK14: omp.inner.for.body9: 13895 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 13896 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 13897 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 13898 // CHECK14-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !16 13899 // CHECK14-NEXT: call void @_Z3fn5v(), !llvm.access.group !16 13900 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 13901 // CHECK14: omp.body.continue12: 13902 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 13903 // CHECK14: omp.inner.for.inc13: 13904 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 13905 // CHECK14-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 13906 // CHECK14-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !16 13907 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP17:![0-9]+]] 13908 // CHECK14: omp.inner.for.end15: 13909 // CHECK14-NEXT: store i32 100, i32* [[I6]], align 4 13910 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 13911 // CHECK14-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 13912 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 13913 // CHECK14-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 13914 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 13915 // CHECK14: omp.inner.for.cond21: 13916 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !19 13917 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !19 13918 // CHECK14-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 13919 // CHECK14-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 13920 // CHECK14: omp.inner.for.body23: 13921 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !19 13922 // CHECK14-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 13923 // CHECK14-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 13924 // CHECK14-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !19 13925 // CHECK14-NEXT: call void @_Z3fn6v(), !llvm.access.group !19 13926 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 13927 // CHECK14: omp.body.continue26: 13928 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 13929 // CHECK14: omp.inner.for.inc27: 13930 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !19 13931 // CHECK14-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 13932 // CHECK14-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !19 13933 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP20:![0-9]+]] 13934 // CHECK14: omp.inner.for.end29: 13935 // CHECK14-NEXT: store i32 100, i32* [[I20]], align 4 13936 // CHECK14-NEXT: [[TMP15:%.*]] = load i32, i32* @Arg, align 4 13937 // CHECK14-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP15]]) 13938 // CHECK14-NEXT: ret i32 [[CALL]] 13939 // 13940 // 13941 // CHECK14-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 13942 // CHECK14-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 13943 // CHECK14-NEXT: entry: 13944 // CHECK14-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 13945 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 13946 // CHECK14-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 13947 // CHECK14-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 13948 // CHECK14-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 13949 // CHECK14-NEXT: [[I:%.*]] = alloca i32, align 4 13950 // CHECK14-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 13951 // CHECK14-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 13952 // CHECK14-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 13953 // CHECK14-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 13954 // CHECK14-NEXT: [[I6:%.*]] = alloca i32, align 4 13955 // CHECK14-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 13956 // CHECK14-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 13957 // CHECK14-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 13958 // CHECK14-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 13959 // CHECK14-NEXT: [[I20:%.*]] = alloca i32, align 4 13960 // CHECK14-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 13961 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 13962 // CHECK14-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 13963 // CHECK14-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 13964 // CHECK14-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 13965 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 13966 // CHECK14: omp.inner.for.cond: 13967 // CHECK14-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 13968 // CHECK14-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22 13969 // CHECK14-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 13970 // CHECK14-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 13971 // CHECK14: omp.inner.for.body: 13972 // CHECK14-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 13973 // CHECK14-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 13974 // CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 13975 // CHECK14-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22 13976 // CHECK14-NEXT: call void @_Z3fn1v(), !llvm.access.group !22 13977 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 13978 // CHECK14: omp.body.continue: 13979 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 13980 // CHECK14: omp.inner.for.inc: 13981 // CHECK14-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 13982 // CHECK14-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 13983 // CHECK14-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 13984 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 13985 // CHECK14: omp.inner.for.end: 13986 // CHECK14-NEXT: store i32 100, i32* [[I]], align 4 13987 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 13988 // CHECK14-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 13989 // CHECK14-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 13990 // CHECK14-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 13991 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 13992 // CHECK14: omp.inner.for.cond7: 13993 // CHECK14-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !25 13994 // CHECK14-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !25 13995 // CHECK14-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 13996 // CHECK14-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 13997 // CHECK14: omp.inner.for.body9: 13998 // CHECK14-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !25 13999 // CHECK14-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 14000 // CHECK14-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 14001 // CHECK14-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !25 14002 // CHECK14-NEXT: call void @_Z3fn2v(), !llvm.access.group !25 14003 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 14004 // CHECK14: omp.body.continue12: 14005 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 14006 // CHECK14: omp.inner.for.inc13: 14007 // CHECK14-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !25 14008 // CHECK14-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 14009 // CHECK14-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !25 14010 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP26:![0-9]+]] 14011 // CHECK14: omp.inner.for.end15: 14012 // CHECK14-NEXT: store i32 100, i32* [[I6]], align 4 14013 // CHECK14-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 14014 // CHECK14-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 14015 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 14016 // CHECK14-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 14017 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 14018 // CHECK14: omp.inner.for.cond21: 14019 // CHECK14-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !28 14020 // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !28 14021 // CHECK14-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 14022 // CHECK14-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 14023 // CHECK14: omp.inner.for.body23: 14024 // CHECK14-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !28 14025 // CHECK14-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 14026 // CHECK14-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 14027 // CHECK14-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !28 14028 // CHECK14-NEXT: call void @_Z3fn3v(), !llvm.access.group !28 14029 // CHECK14-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 14030 // CHECK14: omp.body.continue26: 14031 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 14032 // CHECK14: omp.inner.for.inc27: 14033 // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !28 14034 // CHECK14-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 14035 // CHECK14-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !28 14036 // CHECK14-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP29:![0-9]+]] 14037 // CHECK14: omp.inner.for.end29: 14038 // CHECK14-NEXT: store i32 100, i32* [[I20]], align 4 14039 // CHECK14-NEXT: ret i32 0 14040 // 14041 // 14042 // CHECK15-LABEL: define {{[^@]+}}@_Z9gtid_testv 14043 // CHECK15-SAME: () #[[ATTR0:[0-9]+]] { 14044 // CHECK15-NEXT: entry: 14045 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 14046 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14047 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14048 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14049 // CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4 14050 // CHECK15-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 14051 // CHECK15-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 14052 // CHECK15-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 14053 // CHECK15-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 14054 // CHECK15-NEXT: [[I6:%.*]] = alloca i32, align 4 14055 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14056 // CHECK15-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 14057 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14058 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 14059 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14060 // CHECK15: omp.inner.for.cond: 14061 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 14062 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 14063 // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 14064 // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14065 // CHECK15: omp.inner.for.body: 14066 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 14067 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 14068 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 14069 // CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 14070 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14071 // CHECK15: omp.body.continue: 14072 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14073 // CHECK15: omp.inner.for.inc: 14074 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 14075 // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 14076 // CHECK15-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 14077 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 14078 // CHECK15: omp.inner.for.end: 14079 // CHECK15-NEXT: store i32 100, i32* [[I]], align 4 14080 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 14081 // CHECK15-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 14082 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 14083 // CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 14084 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 14085 // CHECK15: omp.inner.for.cond7: 14086 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 14087 // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !10 14088 // CHECK15-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 14089 // CHECK15-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 14090 // CHECK15: omp.inner.for.body9: 14091 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 14092 // CHECK15-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 14093 // CHECK15-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 14094 // CHECK15-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !10 14095 // CHECK15-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !10 14096 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 14097 // CHECK15: omp.body.continue12: 14098 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 14099 // CHECK15: omp.inner.for.inc13: 14100 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 14101 // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 14102 // CHECK15-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 14103 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP11:![0-9]+]] 14104 // CHECK15: omp.inner.for.end15: 14105 // CHECK15-NEXT: store i32 100, i32* [[I6]], align 4 14106 // CHECK15-NEXT: ret void 14107 // 14108 // 14109 // CHECK15-LABEL: define {{[^@]+}}@main 14110 // CHECK15-SAME: () #[[ATTR1:[0-9]+]] { 14111 // CHECK15-NEXT: entry: 14112 // CHECK15-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 14113 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 14114 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14115 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14116 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14117 // CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4 14118 // CHECK15-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 14119 // CHECK15-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 14120 // CHECK15-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 14121 // CHECK15-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 14122 // CHECK15-NEXT: [[I6:%.*]] = alloca i32, align 4 14123 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 14124 // CHECK15-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 14125 // CHECK15-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 14126 // CHECK15-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 14127 // CHECK15-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 14128 // CHECK15-NEXT: [[I20:%.*]] = alloca i32, align 4 14129 // CHECK15-NEXT: store i32 0, i32* [[RETVAL]], align 4 14130 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14131 // CHECK15-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 14132 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14133 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 14134 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14135 // CHECK15: omp.inner.for.cond: 14136 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 14137 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 14138 // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 14139 // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14140 // CHECK15: omp.inner.for.body: 14141 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 14142 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 14143 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 14144 // CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13 14145 // CHECK15-NEXT: call void @_Z3fn4v(), !llvm.access.group !13 14146 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14147 // CHECK15: omp.body.continue: 14148 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14149 // CHECK15: omp.inner.for.inc: 14150 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 14151 // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 14152 // CHECK15-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 14153 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 14154 // CHECK15: omp.inner.for.end: 14155 // CHECK15-NEXT: store i32 100, i32* [[I]], align 4 14156 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 14157 // CHECK15-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 14158 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 14159 // CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 14160 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 14161 // CHECK15: omp.inner.for.cond7: 14162 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14163 // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4 14164 // CHECK15-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 14165 // CHECK15-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 14166 // CHECK15: omp.inner.for.body9: 14167 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14168 // CHECK15-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 14169 // CHECK15-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 14170 // CHECK15-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4 14171 // CHECK15-NEXT: call void @_Z3fn5v() 14172 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 14173 // CHECK15: omp.body.continue12: 14174 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 14175 // CHECK15: omp.inner.for.inc13: 14176 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14177 // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 14178 // CHECK15-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4 14179 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP16:![0-9]+]] 14180 // CHECK15: omp.inner.for.end15: 14181 // CHECK15-NEXT: store i32 100, i32* [[I6]], align 4 14182 // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* @Arg, align 4 14183 // CHECK15-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 14184 // CHECK15-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 14185 // CHECK15-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 14186 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 14187 // CHECK15-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 14188 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 14189 // CHECK15-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV19]], align 4 14190 // CHECK15-NEXT: [[TMP12:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 14191 // CHECK15-NEXT: [[TOBOOL21:%.*]] = trunc i8 [[TMP12]] to i1 14192 // CHECK15-NEXT: br i1 [[TOBOOL21]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 14193 // CHECK15: omp_if.then: 14194 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]] 14195 // CHECK15: omp.inner.for.cond22: 14196 // CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !18 14197 // CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !18 14198 // CHECK15-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 14199 // CHECK15-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END30:%.*]] 14200 // CHECK15: omp.inner.for.body24: 14201 // CHECK15-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !18 14202 // CHECK15-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP15]], 1 14203 // CHECK15-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]] 14204 // CHECK15-NEXT: store i32 [[ADD26]], i32* [[I20]], align 4, !llvm.access.group !18 14205 // CHECK15-NEXT: call void @_Z3fn6v(), !llvm.access.group !18 14206 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] 14207 // CHECK15: omp.body.continue27: 14208 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] 14209 // CHECK15: omp.inner.for.inc28: 14210 // CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !18 14211 // CHECK15-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP16]], 1 14212 // CHECK15-NEXT: store i32 [[ADD29]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !18 14213 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP19:![0-9]+]] 14214 // CHECK15: omp.inner.for.end30: 14215 // CHECK15-NEXT: br label [[OMP_IF_END:%.*]] 14216 // CHECK15: omp_if.else: 14217 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND31:%.*]] 14218 // CHECK15: omp.inner.for.cond31: 14219 // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 14220 // CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4 14221 // CHECK15-NEXT: [[CMP32:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 14222 // CHECK15-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END39:%.*]] 14223 // CHECK15: omp.inner.for.body33: 14224 // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 14225 // CHECK15-NEXT: [[MUL34:%.*]] = mul nsw i32 [[TMP19]], 1 14226 // CHECK15-NEXT: [[ADD35:%.*]] = add nsw i32 0, [[MUL34]] 14227 // CHECK15-NEXT: store i32 [[ADD35]], i32* [[I20]], align 4 14228 // CHECK15-NEXT: call void @_Z3fn6v() 14229 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE36:%.*]] 14230 // CHECK15: omp.body.continue36: 14231 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC37:%.*]] 14232 // CHECK15: omp.inner.for.inc37: 14233 // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 14234 // CHECK15-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP20]], 1 14235 // CHECK15-NEXT: store i32 [[ADD38]], i32* [[DOTOMP_IV19]], align 4 14236 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND31]], !llvm.loop [[LOOP21:![0-9]+]] 14237 // CHECK15: omp.inner.for.end39: 14238 // CHECK15-NEXT: br label [[OMP_IF_END]] 14239 // CHECK15: omp_if.end: 14240 // CHECK15-NEXT: store i32 100, i32* [[I20]], align 4 14241 // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* @Arg, align 4 14242 // CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP21]]) 14243 // CHECK15-NEXT: ret i32 [[CALL]] 14244 // 14245 // 14246 // CHECK15-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 14247 // CHECK15-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 14248 // CHECK15-NEXT: entry: 14249 // CHECK15-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 14250 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 14251 // CHECK15-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14252 // CHECK15-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14253 // CHECK15-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14254 // CHECK15-NEXT: [[I:%.*]] = alloca i32, align 4 14255 // CHECK15-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 14256 // CHECK15-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 14257 // CHECK15-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 14258 // CHECK15-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 14259 // CHECK15-NEXT: [[I6:%.*]] = alloca i32, align 4 14260 // CHECK15-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 14261 // CHECK15-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 14262 // CHECK15-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 14263 // CHECK15-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 14264 // CHECK15-NEXT: [[I20:%.*]] = alloca i32, align 4 14265 // CHECK15-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 14266 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14267 // CHECK15-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 14268 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14269 // CHECK15-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 14270 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14271 // CHECK15: omp.inner.for.cond: 14272 // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 14273 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22 14274 // CHECK15-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 14275 // CHECK15-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14276 // CHECK15: omp.inner.for.body: 14277 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 14278 // CHECK15-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 14279 // CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 14280 // CHECK15-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22 14281 // CHECK15-NEXT: call void @_Z3fn1v(), !llvm.access.group !22 14282 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14283 // CHECK15: omp.body.continue: 14284 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14285 // CHECK15: omp.inner.for.inc: 14286 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 14287 // CHECK15-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 14288 // CHECK15-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 14289 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 14290 // CHECK15: omp.inner.for.end: 14291 // CHECK15-NEXT: store i32 100, i32* [[I]], align 4 14292 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 14293 // CHECK15-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 14294 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 14295 // CHECK15-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 14296 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 14297 // CHECK15: omp.inner.for.cond7: 14298 // CHECK15-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14299 // CHECK15-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4 14300 // CHECK15-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 14301 // CHECK15-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 14302 // CHECK15: omp.inner.for.body9: 14303 // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14304 // CHECK15-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 14305 // CHECK15-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 14306 // CHECK15-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4 14307 // CHECK15-NEXT: call void @_Z3fn2v() 14308 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 14309 // CHECK15: omp.body.continue12: 14310 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 14311 // CHECK15: omp.inner.for.inc13: 14312 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14313 // CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 14314 // CHECK15-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4 14315 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP25:![0-9]+]] 14316 // CHECK15: omp.inner.for.end15: 14317 // CHECK15-NEXT: store i32 100, i32* [[I6]], align 4 14318 // CHECK15-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 14319 // CHECK15-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 14320 // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 14321 // CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 14322 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 14323 // CHECK15: omp.inner.for.cond21: 14324 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !26 14325 // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !26 14326 // CHECK15-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 14327 // CHECK15-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 14328 // CHECK15: omp.inner.for.body23: 14329 // CHECK15-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !26 14330 // CHECK15-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 14331 // CHECK15-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 14332 // CHECK15-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !26 14333 // CHECK15-NEXT: call void @_Z3fn3v(), !llvm.access.group !26 14334 // CHECK15-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 14335 // CHECK15: omp.body.continue26: 14336 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 14337 // CHECK15: omp.inner.for.inc27: 14338 // CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !26 14339 // CHECK15-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 14340 // CHECK15-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !26 14341 // CHECK15-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP27:![0-9]+]] 14342 // CHECK15: omp.inner.for.end29: 14343 // CHECK15-NEXT: store i32 100, i32* [[I20]], align 4 14344 // CHECK15-NEXT: ret i32 0 14345 // 14346 // 14347 // CHECK16-LABEL: define {{[^@]+}}@_Z9gtid_testv 14348 // CHECK16-SAME: () #[[ATTR0:[0-9]+]] { 14349 // CHECK16-NEXT: entry: 14350 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 14351 // CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14352 // CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14353 // CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14354 // CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4 14355 // CHECK16-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 14356 // CHECK16-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 14357 // CHECK16-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 14358 // CHECK16-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 14359 // CHECK16-NEXT: [[I6:%.*]] = alloca i32, align 4 14360 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14361 // CHECK16-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 14362 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14363 // CHECK16-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 14364 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14365 // CHECK16: omp.inner.for.cond: 14366 // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 14367 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 14368 // CHECK16-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 14369 // CHECK16-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14370 // CHECK16: omp.inner.for.body: 14371 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 14372 // CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 14373 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 14374 // CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 14375 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14376 // CHECK16: omp.body.continue: 14377 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14378 // CHECK16: omp.inner.for.inc: 14379 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 14380 // CHECK16-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 14381 // CHECK16-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 14382 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 14383 // CHECK16: omp.inner.for.end: 14384 // CHECK16-NEXT: store i32 100, i32* [[I]], align 4 14385 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 14386 // CHECK16-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 14387 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 14388 // CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 14389 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 14390 // CHECK16: omp.inner.for.cond7: 14391 // CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 14392 // CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4, !llvm.access.group !10 14393 // CHECK16-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 14394 // CHECK16-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 14395 // CHECK16: omp.inner.for.body9: 14396 // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 14397 // CHECK16-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 14398 // CHECK16-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 14399 // CHECK16-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4, !llvm.access.group !10 14400 // CHECK16-NEXT: call void @_Z9gtid_testv(), !llvm.access.group !10 14401 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 14402 // CHECK16: omp.body.continue12: 14403 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 14404 // CHECK16: omp.inner.for.inc13: 14405 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 14406 // CHECK16-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 14407 // CHECK16-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4, !llvm.access.group !10 14408 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP11:![0-9]+]] 14409 // CHECK16: omp.inner.for.end15: 14410 // CHECK16-NEXT: store i32 100, i32* [[I6]], align 4 14411 // CHECK16-NEXT: ret void 14412 // 14413 // 14414 // CHECK16-LABEL: define {{[^@]+}}@main 14415 // CHECK16-SAME: () #[[ATTR1:[0-9]+]] { 14416 // CHECK16-NEXT: entry: 14417 // CHECK16-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 14418 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 14419 // CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14420 // CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14421 // CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14422 // CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4 14423 // CHECK16-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 14424 // CHECK16-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 14425 // CHECK16-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 14426 // CHECK16-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 14427 // CHECK16-NEXT: [[I6:%.*]] = alloca i32, align 4 14428 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i8, align 1 14429 // CHECK16-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 14430 // CHECK16-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 14431 // CHECK16-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 14432 // CHECK16-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 14433 // CHECK16-NEXT: [[I20:%.*]] = alloca i32, align 4 14434 // CHECK16-NEXT: store i32 0, i32* [[RETVAL]], align 4 14435 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14436 // CHECK16-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 14437 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14438 // CHECK16-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 14439 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14440 // CHECK16: omp.inner.for.cond: 14441 // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 14442 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !13 14443 // CHECK16-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 14444 // CHECK16-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14445 // CHECK16: omp.inner.for.body: 14446 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 14447 // CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 14448 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 14449 // CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !13 14450 // CHECK16-NEXT: call void @_Z3fn4v(), !llvm.access.group !13 14451 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14452 // CHECK16: omp.body.continue: 14453 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14454 // CHECK16: omp.inner.for.inc: 14455 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 14456 // CHECK16-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 14457 // CHECK16-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !13 14458 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP14:![0-9]+]] 14459 // CHECK16: omp.inner.for.end: 14460 // CHECK16-NEXT: store i32 100, i32* [[I]], align 4 14461 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 14462 // CHECK16-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 14463 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 14464 // CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 14465 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 14466 // CHECK16: omp.inner.for.cond7: 14467 // CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14468 // CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4 14469 // CHECK16-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 14470 // CHECK16-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 14471 // CHECK16: omp.inner.for.body9: 14472 // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14473 // CHECK16-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 14474 // CHECK16-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 14475 // CHECK16-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4 14476 // CHECK16-NEXT: call void @_Z3fn5v() 14477 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 14478 // CHECK16: omp.body.continue12: 14479 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 14480 // CHECK16: omp.inner.for.inc13: 14481 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14482 // CHECK16-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 14483 // CHECK16-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4 14484 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP16:![0-9]+]] 14485 // CHECK16: omp.inner.for.end15: 14486 // CHECK16-NEXT: store i32 100, i32* [[I6]], align 4 14487 // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* @Arg, align 4 14488 // CHECK16-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP10]], 0 14489 // CHECK16-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 14490 // CHECK16-NEXT: store i8 [[FROMBOOL]], i8* [[DOTCAPTURE_EXPR_]], align 1 14491 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 14492 // CHECK16-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 14493 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 14494 // CHECK16-NEXT: store i32 [[TMP11]], i32* [[DOTOMP_IV19]], align 4 14495 // CHECK16-NEXT: [[TMP12:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 14496 // CHECK16-NEXT: [[TOBOOL21:%.*]] = trunc i8 [[TMP12]] to i1 14497 // CHECK16-NEXT: br i1 [[TOBOOL21]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] 14498 // CHECK16: omp_if.then: 14499 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND22:%.*]] 14500 // CHECK16: omp.inner.for.cond22: 14501 // CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !18 14502 // CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !18 14503 // CHECK16-NEXT: [[CMP23:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]] 14504 // CHECK16-NEXT: br i1 [[CMP23]], label [[OMP_INNER_FOR_BODY24:%.*]], label [[OMP_INNER_FOR_END30:%.*]] 14505 // CHECK16: omp.inner.for.body24: 14506 // CHECK16-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !18 14507 // CHECK16-NEXT: [[MUL25:%.*]] = mul nsw i32 [[TMP15]], 1 14508 // CHECK16-NEXT: [[ADD26:%.*]] = add nsw i32 0, [[MUL25]] 14509 // CHECK16-NEXT: store i32 [[ADD26]], i32* [[I20]], align 4, !llvm.access.group !18 14510 // CHECK16-NEXT: call void @_Z3fn6v(), !llvm.access.group !18 14511 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE27:%.*]] 14512 // CHECK16: omp.body.continue27: 14513 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC28:%.*]] 14514 // CHECK16: omp.inner.for.inc28: 14515 // CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !18 14516 // CHECK16-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP16]], 1 14517 // CHECK16-NEXT: store i32 [[ADD29]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !18 14518 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND22]], !llvm.loop [[LOOP19:![0-9]+]] 14519 // CHECK16: omp.inner.for.end30: 14520 // CHECK16-NEXT: br label [[OMP_IF_END:%.*]] 14521 // CHECK16: omp_if.else: 14522 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND31:%.*]] 14523 // CHECK16: omp.inner.for.cond31: 14524 // CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 14525 // CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4 14526 // CHECK16-NEXT: [[CMP32:%.*]] = icmp sle i32 [[TMP17]], [[TMP18]] 14527 // CHECK16-NEXT: br i1 [[CMP32]], label [[OMP_INNER_FOR_BODY33:%.*]], label [[OMP_INNER_FOR_END39:%.*]] 14528 // CHECK16: omp.inner.for.body33: 14529 // CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 14530 // CHECK16-NEXT: [[MUL34:%.*]] = mul nsw i32 [[TMP19]], 1 14531 // CHECK16-NEXT: [[ADD35:%.*]] = add nsw i32 0, [[MUL34]] 14532 // CHECK16-NEXT: store i32 [[ADD35]], i32* [[I20]], align 4 14533 // CHECK16-NEXT: call void @_Z3fn6v() 14534 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE36:%.*]] 14535 // CHECK16: omp.body.continue36: 14536 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC37:%.*]] 14537 // CHECK16: omp.inner.for.inc37: 14538 // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4 14539 // CHECK16-NEXT: [[ADD38:%.*]] = add nsw i32 [[TMP20]], 1 14540 // CHECK16-NEXT: store i32 [[ADD38]], i32* [[DOTOMP_IV19]], align 4 14541 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND31]], !llvm.loop [[LOOP21:![0-9]+]] 14542 // CHECK16: omp.inner.for.end39: 14543 // CHECK16-NEXT: br label [[OMP_IF_END]] 14544 // CHECK16: omp_if.end: 14545 // CHECK16-NEXT: store i32 100, i32* [[I20]], align 4 14546 // CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* @Arg, align 4 14547 // CHECK16-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiEiT_(i32 noundef [[TMP21]]) 14548 // CHECK16-NEXT: ret i32 [[CALL]] 14549 // 14550 // 14551 // CHECK16-LABEL: define {{[^@]+}}@_Z5tmainIiEiT_ 14552 // CHECK16-SAME: (i32 noundef [[ARG:%.*]]) #[[ATTR0]] comdat { 14553 // CHECK16-NEXT: entry: 14554 // CHECK16-NEXT: [[ARG_ADDR:%.*]] = alloca i32, align 4 14555 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 14556 // CHECK16-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 14557 // CHECK16-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 14558 // CHECK16-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 14559 // CHECK16-NEXT: [[I:%.*]] = alloca i32, align 4 14560 // CHECK16-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 14561 // CHECK16-NEXT: [[DOTOMP_LB3:%.*]] = alloca i32, align 4 14562 // CHECK16-NEXT: [[DOTOMP_UB4:%.*]] = alloca i32, align 4 14563 // CHECK16-NEXT: [[DOTOMP_IV5:%.*]] = alloca i32, align 4 14564 // CHECK16-NEXT: [[I6:%.*]] = alloca i32, align 4 14565 // CHECK16-NEXT: [[_TMP16:%.*]] = alloca i32, align 4 14566 // CHECK16-NEXT: [[DOTOMP_LB17:%.*]] = alloca i32, align 4 14567 // CHECK16-NEXT: [[DOTOMP_UB18:%.*]] = alloca i32, align 4 14568 // CHECK16-NEXT: [[DOTOMP_IV19:%.*]] = alloca i32, align 4 14569 // CHECK16-NEXT: [[I20:%.*]] = alloca i32, align 4 14570 // CHECK16-NEXT: store i32 [[ARG]], i32* [[ARG_ADDR]], align 4 14571 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 14572 // CHECK16-NEXT: store i32 99, i32* [[DOTOMP_UB]], align 4 14573 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 14574 // CHECK16-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 14575 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 14576 // CHECK16: omp.inner.for.cond: 14577 // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 14578 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !22 14579 // CHECK16-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 14580 // CHECK16-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 14581 // CHECK16: omp.inner.for.body: 14582 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 14583 // CHECK16-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 14584 // CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 14585 // CHECK16-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !22 14586 // CHECK16-NEXT: call void @_Z3fn1v(), !llvm.access.group !22 14587 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 14588 // CHECK16: omp.body.continue: 14589 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 14590 // CHECK16: omp.inner.for.inc: 14591 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 14592 // CHECK16-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], 1 14593 // CHECK16-NEXT: store i32 [[ADD1]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !22 14594 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]] 14595 // CHECK16: omp.inner.for.end: 14596 // CHECK16-NEXT: store i32 100, i32* [[I]], align 4 14597 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB3]], align 4 14598 // CHECK16-NEXT: store i32 99, i32* [[DOTOMP_UB4]], align 4 14599 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB3]], align 4 14600 // CHECK16-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV5]], align 4 14601 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND7:%.*]] 14602 // CHECK16: omp.inner.for.cond7: 14603 // CHECK16-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14604 // CHECK16-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB4]], align 4 14605 // CHECK16-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 14606 // CHECK16-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY9:%.*]], label [[OMP_INNER_FOR_END15:%.*]] 14607 // CHECK16: omp.inner.for.body9: 14608 // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14609 // CHECK16-NEXT: [[MUL10:%.*]] = mul nsw i32 [[TMP8]], 1 14610 // CHECK16-NEXT: [[ADD11:%.*]] = add nsw i32 0, [[MUL10]] 14611 // CHECK16-NEXT: store i32 [[ADD11]], i32* [[I6]], align 4 14612 // CHECK16-NEXT: call void @_Z3fn2v() 14613 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE12:%.*]] 14614 // CHECK16: omp.body.continue12: 14615 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC13:%.*]] 14616 // CHECK16: omp.inner.for.inc13: 14617 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV5]], align 4 14618 // CHECK16-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP9]], 1 14619 // CHECK16-NEXT: store i32 [[ADD14]], i32* [[DOTOMP_IV5]], align 4 14620 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND7]], !llvm.loop [[LOOP25:![0-9]+]] 14621 // CHECK16: omp.inner.for.end15: 14622 // CHECK16-NEXT: store i32 100, i32* [[I6]], align 4 14623 // CHECK16-NEXT: store i32 0, i32* [[DOTOMP_LB17]], align 4 14624 // CHECK16-NEXT: store i32 99, i32* [[DOTOMP_UB18]], align 4 14625 // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_LB17]], align 4 14626 // CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_IV19]], align 4 14627 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND21:%.*]] 14628 // CHECK16: omp.inner.for.cond21: 14629 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !26 14630 // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_UB18]], align 4, !llvm.access.group !26 14631 // CHECK16-NEXT: [[CMP22:%.*]] = icmp sle i32 [[TMP11]], [[TMP12]] 14632 // CHECK16-NEXT: br i1 [[CMP22]], label [[OMP_INNER_FOR_BODY23:%.*]], label [[OMP_INNER_FOR_END29:%.*]] 14633 // CHECK16: omp.inner.for.body23: 14634 // CHECK16-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !26 14635 // CHECK16-NEXT: [[MUL24:%.*]] = mul nsw i32 [[TMP13]], 1 14636 // CHECK16-NEXT: [[ADD25:%.*]] = add nsw i32 0, [[MUL24]] 14637 // CHECK16-NEXT: store i32 [[ADD25]], i32* [[I20]], align 4, !llvm.access.group !26 14638 // CHECK16-NEXT: call void @_Z3fn3v(), !llvm.access.group !26 14639 // CHECK16-NEXT: br label [[OMP_BODY_CONTINUE26:%.*]] 14640 // CHECK16: omp.body.continue26: 14641 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC27:%.*]] 14642 // CHECK16: omp.inner.for.inc27: 14643 // CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !26 14644 // CHECK16-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP14]], 1 14645 // CHECK16-NEXT: store i32 [[ADD28]], i32* [[DOTOMP_IV19]], align 4, !llvm.access.group !26 14646 // CHECK16-NEXT: br label [[OMP_INNER_FOR_COND21]], !llvm.loop [[LOOP27:![0-9]+]] 14647 // CHECK16: omp.inner.for.end29: 14648 // CHECK16-NEXT: store i32 100, i32* [[I20]], align 4 14649 // CHECK16-NEXT: ret i32 0 14650 // 14651