1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 3 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 4 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2 5 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3 6 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 7 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4 8 9 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK5 10 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 11 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK6 12 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK7 13 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 14 // RUN: %clang_cc1 -no-opaque-pointers -DCHECK -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK8 15 16 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9 17 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 18 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10 19 20 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK11 21 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 22 // RUN: %clang_cc1 -no-opaque-pointers -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12 23 24 // expected-no-diagnostics 25 #ifndef HEADER 26 #define HEADER 27 28 template <typename T> 29 T tmain() { 30 T t_var = T(); 31 T vec[] = {1, 2}; 32 #pragma omp target 33 #pragma omp teams 34 #pragma omp distribute simd reduction(+: t_var) 35 for (int i = 0; i < 2; ++i) { 36 t_var += (T) i; 37 } 38 return T(); 39 } 40 41 int main() { 42 static int sivar; 43 #ifdef LAMBDA 44 [&]() { 45 #pragma omp target 46 #pragma omp teams 47 #pragma omp distribute simd reduction(+: sivar) 48 for (int i = 0; i < 2; ++i) { 49 50 // Skip global and bound tid vars 51 52 53 sivar += i; 54 55 [&]() { 56 57 sivar += 4; 58 59 }(); 60 } 61 }(); 62 return 0; 63 #else 64 #pragma omp target 65 #pragma omp teams 66 #pragma omp distribute simd reduction(+: sivar) 67 for (int i = 0; i < 2; ++i) { 68 sivar += i; 69 } 70 return tmain<int>(); 71 #endif 72 } 73 74 75 76 // Skip global and bound tid vars 77 78 79 80 81 // Skip global and bound tid vars 82 83 84 #endif 85 // CHECK1-LABEL: define {{[^@]+}}@main 86 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 87 // CHECK1-NEXT: entry: 88 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 89 // CHECK1-NEXT: [[SIVAR_CASTED:%.*]] = alloca i64, align 8 90 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 91 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 92 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 93 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 94 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 95 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 96 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* 97 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 98 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 99 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 100 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i64* 101 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP3]], align 8 102 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 103 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i64* 104 // CHECK1-NEXT: store i64 [[TMP1]], i64* [[TMP5]], align 8 105 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 106 // CHECK1-NEXT: store i8* null, i8** [[TMP6]], align 8 107 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 108 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 109 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 2) 110 // CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 111 // CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 112 // CHECK1-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 113 // CHECK1: omp_offload.failed: 114 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64(i64 [[TMP1]]) #[[ATTR2:[0-9]+]] 115 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 116 // CHECK1: omp_offload.cont: 117 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() 118 // CHECK1-NEXT: ret i32 [[CALL]] 119 // 120 // 121 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64 122 // CHECK1-SAME: (i64 noundef [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] { 123 // CHECK1-NEXT: entry: 124 // CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 125 // CHECK1-NEXT: store i64 [[SIVAR]], i64* [[SIVAR_ADDR]], align 8 126 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[SIVAR_ADDR]] to i32* 127 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]]) 128 // CHECK1-NEXT: ret void 129 // 130 // 131 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 132 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] { 133 // CHECK1-NEXT: entry: 134 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 135 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 136 // CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8 137 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 138 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 139 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 140 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 141 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 142 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 143 // CHECK1-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4 144 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 145 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 146 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 147 // CHECK1-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8 148 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8 149 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 150 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 151 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 152 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 153 // CHECK1-NEXT: store i32 0, i32* [[SIVAR1]], align 4 154 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 155 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 156 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 157 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 158 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 159 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 160 // CHECK1: cond.true: 161 // CHECK1-NEXT: br label [[COND_END:%.*]] 162 // CHECK1: cond.false: 163 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 164 // CHECK1-NEXT: br label [[COND_END]] 165 // CHECK1: cond.end: 166 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 167 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 168 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 169 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 170 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 171 // CHECK1: omp.inner.for.cond: 172 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 173 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !5 174 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 175 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 176 // CHECK1: omp.inner.for.body: 177 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 178 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 179 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 180 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !5 181 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 182 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !5 183 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 184 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !5 185 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 186 // CHECK1: omp.body.continue: 187 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 188 // CHECK1: omp.inner.for.inc: 189 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 190 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 191 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 192 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] 193 // CHECK1: omp.inner.for.end: 194 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 195 // CHECK1: omp.loop.exit: 196 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 197 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 198 // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 199 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 200 // CHECK1: .omp.final.then: 201 // CHECK1-NEXT: store i32 2, i32* [[I]], align 4 202 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 203 // CHECK1: .omp.final.done: 204 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4 205 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[SIVAR1]], align 4 206 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 207 // CHECK1-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 208 // CHECK1-NEXT: ret void 209 // 210 // 211 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 212 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] comdat { 213 // CHECK1-NEXT: entry: 214 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 215 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 216 // CHECK1-NEXT: [[T_VAR_CASTED:%.*]] = alloca i64, align 8 217 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 218 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 219 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 220 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 221 // CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4 222 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 223 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 224 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 4 225 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32* 226 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4 227 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8 228 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 229 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i64* 230 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8 231 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 232 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 233 // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP6]], align 8 234 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 235 // CHECK1-NEXT: store i8* null, i8** [[TMP7]], align 8 236 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 237 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 238 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 2) 239 // CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 240 // CHECK1-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 241 // CHECK1-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 242 // CHECK1: omp_offload.failed: 243 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i64 [[TMP2]]) #[[ATTR2]] 244 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 245 // CHECK1: omp_offload.cont: 246 // CHECK1-NEXT: ret i32 0 247 // 248 // 249 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32 250 // CHECK1-SAME: (i64 noundef [[T_VAR:%.*]]) #[[ATTR1]] { 251 // CHECK1-NEXT: entry: 252 // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 253 // CHECK1-NEXT: store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8 254 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32* 255 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]]) 256 // CHECK1-NEXT: ret void 257 // 258 // 259 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 260 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] { 261 // CHECK1-NEXT: entry: 262 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 263 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 264 // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8 265 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 266 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 267 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 268 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 269 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 270 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 271 // CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 272 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 273 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 274 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 275 // CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8 276 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8 277 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 278 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 279 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 280 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 281 // CHECK1-NEXT: store i32 0, i32* [[T_VAR1]], align 4 282 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 283 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 284 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 285 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 286 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 287 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 288 // CHECK1: cond.true: 289 // CHECK1-NEXT: br label [[COND_END:%.*]] 290 // CHECK1: cond.false: 291 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 292 // CHECK1-NEXT: br label [[COND_END]] 293 // CHECK1: cond.end: 294 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 295 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 296 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 297 // CHECK1-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 298 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 299 // CHECK1: omp.inner.for.cond: 300 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 301 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 302 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 303 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 304 // CHECK1: omp.inner.for.body: 305 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 306 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 307 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 308 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 309 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 310 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !11 311 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 312 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[T_VAR1]], align 4, !llvm.access.group !11 313 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 314 // CHECK1: omp.body.continue: 315 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 316 // CHECK1: omp.inner.for.inc: 317 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 318 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 319 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 320 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 321 // CHECK1: omp.inner.for.end: 322 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 323 // CHECK1: omp.loop.exit: 324 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 325 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 326 // CHECK1-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 327 // CHECK1-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 328 // CHECK1: .omp.final.then: 329 // CHECK1-NEXT: store i32 2, i32* [[I]], align 4 330 // CHECK1-NEXT: br label [[DOTOMP_FINAL_DONE]] 331 // CHECK1: .omp.final.done: 332 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4 333 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR1]], align 4 334 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 335 // CHECK1-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 336 // CHECK1-NEXT: ret void 337 // 338 // 339 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 340 // CHECK1-SAME: () #[[ATTR5:[0-9]+]] { 341 // CHECK1-NEXT: entry: 342 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 343 // CHECK1-NEXT: ret void 344 // 345 // 346 // CHECK2-LABEL: define {{[^@]+}}@main 347 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] { 348 // CHECK2-NEXT: entry: 349 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 350 // CHECK2-NEXT: [[SIVAR_CASTED:%.*]] = alloca i64, align 8 351 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 352 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 353 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 354 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 355 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 356 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 357 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[SIVAR_CASTED]] to i32* 358 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4 359 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[SIVAR_CASTED]], align 8 360 // CHECK2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 361 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i64* 362 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP3]], align 8 363 // CHECK2-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 364 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i64* 365 // CHECK2-NEXT: store i64 [[TMP1]], i64* [[TMP5]], align 8 366 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 367 // CHECK2-NEXT: store i8* null, i8** [[TMP6]], align 8 368 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 369 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 370 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 2) 371 // CHECK2-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 372 // CHECK2-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 373 // CHECK2-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 374 // CHECK2: omp_offload.failed: 375 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64(i64 [[TMP1]]) #[[ATTR2:[0-9]+]] 376 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 377 // CHECK2: omp_offload.cont: 378 // CHECK2-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() 379 // CHECK2-NEXT: ret i32 [[CALL]] 380 // 381 // 382 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64 383 // CHECK2-SAME: (i64 noundef [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] { 384 // CHECK2-NEXT: entry: 385 // CHECK2-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 386 // CHECK2-NEXT: store i64 [[SIVAR]], i64* [[SIVAR_ADDR]], align 8 387 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[SIVAR_ADDR]] to i32* 388 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]]) 389 // CHECK2-NEXT: ret void 390 // 391 // 392 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 393 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] { 394 // CHECK2-NEXT: entry: 395 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 396 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 397 // CHECK2-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8 398 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 399 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 400 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 401 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 402 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 403 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 404 // CHECK2-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4 405 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 406 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 407 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 408 // CHECK2-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8 409 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8 410 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 411 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 412 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 413 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 414 // CHECK2-NEXT: store i32 0, i32* [[SIVAR1]], align 4 415 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 416 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 417 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 418 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 419 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 420 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 421 // CHECK2: cond.true: 422 // CHECK2-NEXT: br label [[COND_END:%.*]] 423 // CHECK2: cond.false: 424 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 425 // CHECK2-NEXT: br label [[COND_END]] 426 // CHECK2: cond.end: 427 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 428 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 429 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 430 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 431 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 432 // CHECK2: omp.inner.for.cond: 433 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 434 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !5 435 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 436 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 437 // CHECK2: omp.inner.for.body: 438 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 439 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 440 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 441 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !5 442 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !5 443 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !5 444 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 445 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !5 446 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 447 // CHECK2: omp.body.continue: 448 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 449 // CHECK2: omp.inner.for.inc: 450 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 451 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 452 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !5 453 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP6:![0-9]+]] 454 // CHECK2: omp.inner.for.end: 455 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 456 // CHECK2: omp.loop.exit: 457 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 458 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 459 // CHECK2-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 460 // CHECK2-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 461 // CHECK2: .omp.final.then: 462 // CHECK2-NEXT: store i32 2, i32* [[I]], align 4 463 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 464 // CHECK2: .omp.final.done: 465 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4 466 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[SIVAR1]], align 4 467 // CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 468 // CHECK2-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 469 // CHECK2-NEXT: ret void 470 // 471 // 472 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 473 // CHECK2-SAME: () #[[ATTR3:[0-9]+]] comdat { 474 // CHECK2-NEXT: entry: 475 // CHECK2-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 476 // CHECK2-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 477 // CHECK2-NEXT: [[T_VAR_CASTED:%.*]] = alloca i64, align 8 478 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 479 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 480 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 481 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 482 // CHECK2-NEXT: store i32 0, i32* [[T_VAR]], align 4 483 // CHECK2-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 484 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 485 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 4 486 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32* 487 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4 488 // CHECK2-NEXT: [[TMP2:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8 489 // CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 490 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i64* 491 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP4]], align 8 492 // CHECK2-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 493 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i64* 494 // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP6]], align 8 495 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 496 // CHECK2-NEXT: store i8* null, i8** [[TMP7]], align 8 497 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 498 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 499 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 2) 500 // CHECK2-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 501 // CHECK2-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 502 // CHECK2-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 503 // CHECK2: omp_offload.failed: 504 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i64 [[TMP2]]) #[[ATTR2]] 505 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 506 // CHECK2: omp_offload.cont: 507 // CHECK2-NEXT: ret i32 0 508 // 509 // 510 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32 511 // CHECK2-SAME: (i64 noundef [[T_VAR:%.*]]) #[[ATTR1]] { 512 // CHECK2-NEXT: entry: 513 // CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i64, align 8 514 // CHECK2-NEXT: store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8 515 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32* 516 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[CONV]]) 517 // CHECK2-NEXT: ret void 518 // 519 // 520 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1 521 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] { 522 // CHECK2-NEXT: entry: 523 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 524 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 525 // CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8 526 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 527 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 528 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 529 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 530 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 531 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 532 // CHECK2-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 533 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 534 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 535 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 536 // CHECK2-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8 537 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8 538 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 539 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 540 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 541 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 542 // CHECK2-NEXT: store i32 0, i32* [[T_VAR1]], align 4 543 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 544 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 545 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 546 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 547 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 548 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 549 // CHECK2: cond.true: 550 // CHECK2-NEXT: br label [[COND_END:%.*]] 551 // CHECK2: cond.false: 552 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 553 // CHECK2-NEXT: br label [[COND_END]] 554 // CHECK2: cond.end: 555 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 556 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 557 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 558 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 559 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 560 // CHECK2: omp.inner.for.cond: 561 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 562 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !11 563 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 564 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 565 // CHECK2: omp.inner.for.body: 566 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 567 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 568 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 569 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !11 570 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !11 571 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !11 572 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 573 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[T_VAR1]], align 4, !llvm.access.group !11 574 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 575 // CHECK2: omp.body.continue: 576 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 577 // CHECK2: omp.inner.for.inc: 578 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 579 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 580 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !11 581 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 582 // CHECK2: omp.inner.for.end: 583 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 584 // CHECK2: omp.loop.exit: 585 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 586 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 587 // CHECK2-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 588 // CHECK2-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 589 // CHECK2: .omp.final.then: 590 // CHECK2-NEXT: store i32 2, i32* [[I]], align 4 591 // CHECK2-NEXT: br label [[DOTOMP_FINAL_DONE]] 592 // CHECK2: .omp.final.done: 593 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4 594 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR1]], align 4 595 // CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 596 // CHECK2-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 597 // CHECK2-NEXT: ret void 598 // 599 // 600 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 601 // CHECK2-SAME: () #[[ATTR5:[0-9]+]] { 602 // CHECK2-NEXT: entry: 603 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1) 604 // CHECK2-NEXT: ret void 605 // 606 // 607 // CHECK3-LABEL: define {{[^@]+}}@main 608 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] { 609 // CHECK3-NEXT: entry: 610 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 611 // CHECK3-NEXT: [[SIVAR_CASTED:%.*]] = alloca i32, align 4 612 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 613 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 614 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 615 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 616 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4 617 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 618 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[SIVAR_CASTED]], align 4 619 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIVAR_CASTED]], align 4 620 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 621 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32* 622 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP3]], align 4 623 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 624 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32* 625 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP5]], align 4 626 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 627 // CHECK3-NEXT: store i8* null, i8** [[TMP6]], align 4 628 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 629 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 630 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 2) 631 // CHECK3-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 632 // CHECK3-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 633 // CHECK3-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 634 // CHECK3: omp_offload.failed: 635 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64(i32 [[TMP1]]) #[[ATTR2:[0-9]+]] 636 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 637 // CHECK3: omp_offload.cont: 638 // CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() 639 // CHECK3-NEXT: ret i32 [[CALL]] 640 // 641 // 642 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64 643 // CHECK3-SAME: (i32 noundef [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] { 644 // CHECK3-NEXT: entry: 645 // CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32, align 4 646 // CHECK3-NEXT: store i32 [[SIVAR]], i32* [[SIVAR_ADDR]], align 4 647 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[SIVAR_ADDR]]) 648 // CHECK3-NEXT: ret void 649 // 650 // 651 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. 652 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] { 653 // CHECK3-NEXT: entry: 654 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 655 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 656 // CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 4 657 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 658 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 659 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 660 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 661 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 662 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 663 // CHECK3-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4 664 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 665 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 666 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 667 // CHECK3-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4 668 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4 669 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 670 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 671 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 672 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 673 // CHECK3-NEXT: store i32 0, i32* [[SIVAR1]], align 4 674 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 675 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 676 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 677 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 678 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 679 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 680 // CHECK3: cond.true: 681 // CHECK3-NEXT: br label [[COND_END:%.*]] 682 // CHECK3: cond.false: 683 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 684 // CHECK3-NEXT: br label [[COND_END]] 685 // CHECK3: cond.end: 686 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 687 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 688 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 689 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 690 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 691 // CHECK3: omp.inner.for.cond: 692 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 693 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 694 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 695 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 696 // CHECK3: omp.inner.for.body: 697 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 698 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 699 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 700 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 701 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 702 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !6 703 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 704 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !6 705 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 706 // CHECK3: omp.body.continue: 707 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 708 // CHECK3: omp.inner.for.inc: 709 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 710 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 711 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 712 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 713 // CHECK3: omp.inner.for.end: 714 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 715 // CHECK3: omp.loop.exit: 716 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 717 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 718 // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 719 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 720 // CHECK3: .omp.final.then: 721 // CHECK3-NEXT: store i32 2, i32* [[I]], align 4 722 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 723 // CHECK3: .omp.final.done: 724 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4 725 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[SIVAR1]], align 4 726 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 727 // CHECK3-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 728 // CHECK3-NEXT: ret void 729 // 730 // 731 // CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 732 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] comdat { 733 // CHECK3-NEXT: entry: 734 // CHECK3-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 735 // CHECK3-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 736 // CHECK3-NEXT: [[T_VAR_CASTED:%.*]] = alloca i32, align 4 737 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 738 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 739 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 740 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 741 // CHECK3-NEXT: store i32 0, i32* [[T_VAR]], align 4 742 // CHECK3-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 743 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false) 744 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 4 745 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[T_VAR_CASTED]], align 4 746 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4 747 // CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 748 // CHECK3-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32* 749 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4 750 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 751 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 752 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[TMP6]], align 4 753 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 754 // CHECK3-NEXT: store i8* null, i8** [[TMP7]], align 4 755 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 756 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 757 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 2) 758 // CHECK3-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 759 // CHECK3-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 760 // CHECK3-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 761 // CHECK3: omp_offload.failed: 762 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32 [[TMP2]]) #[[ATTR2]] 763 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 764 // CHECK3: omp_offload.cont: 765 // CHECK3-NEXT: ret i32 0 766 // 767 // 768 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32 769 // CHECK3-SAME: (i32 noundef [[T_VAR:%.*]]) #[[ATTR1]] { 770 // CHECK3-NEXT: entry: 771 // CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 772 // CHECK3-NEXT: store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4 773 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[T_VAR_ADDR]]) 774 // CHECK3-NEXT: ret void 775 // 776 // 777 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1 778 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] { 779 // CHECK3-NEXT: entry: 780 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 781 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 782 // CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 4 783 // CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 784 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 785 // CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 786 // CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 787 // CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 788 // CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 789 // CHECK3-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 790 // CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4 791 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 792 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 793 // CHECK3-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4 794 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4 795 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 796 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 797 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 798 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 799 // CHECK3-NEXT: store i32 0, i32* [[T_VAR1]], align 4 800 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 801 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 802 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 803 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 804 // CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 805 // CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 806 // CHECK3: cond.true: 807 // CHECK3-NEXT: br label [[COND_END:%.*]] 808 // CHECK3: cond.false: 809 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 810 // CHECK3-NEXT: br label [[COND_END]] 811 // CHECK3: cond.end: 812 // CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 813 // CHECK3-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 814 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 815 // CHECK3-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 816 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 817 // CHECK3: omp.inner.for.cond: 818 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 819 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 820 // CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 821 // CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 822 // CHECK3: omp.inner.for.body: 823 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 824 // CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 825 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 826 // CHECK3-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 827 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 828 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !12 829 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 830 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[T_VAR1]], align 4, !llvm.access.group !12 831 // CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 832 // CHECK3: omp.body.continue: 833 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 834 // CHECK3: omp.inner.for.inc: 835 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 836 // CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 837 // CHECK3-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 838 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 839 // CHECK3: omp.inner.for.end: 840 // CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 841 // CHECK3: omp.loop.exit: 842 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 843 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 844 // CHECK3-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 845 // CHECK3-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 846 // CHECK3: .omp.final.then: 847 // CHECK3-NEXT: store i32 2, i32* [[I]], align 4 848 // CHECK3-NEXT: br label [[DOTOMP_FINAL_DONE]] 849 // CHECK3: .omp.final.done: 850 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4 851 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR1]], align 4 852 // CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 853 // CHECK3-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 854 // CHECK3-NEXT: ret void 855 // 856 // 857 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 858 // CHECK3-SAME: () #[[ATTR5:[0-9]+]] { 859 // CHECK3-NEXT: entry: 860 // CHECK3-NEXT: call void @__tgt_register_requires(i64 1) 861 // CHECK3-NEXT: ret void 862 // 863 // 864 // CHECK4-LABEL: define {{[^@]+}}@main 865 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] { 866 // CHECK4-NEXT: entry: 867 // CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 868 // CHECK4-NEXT: [[SIVAR_CASTED:%.*]] = alloca i32, align 4 869 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 870 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 871 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 872 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 873 // CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4 874 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 875 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[SIVAR_CASTED]], align 4 876 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIVAR_CASTED]], align 4 877 // CHECK4-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 878 // CHECK4-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32* 879 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP3]], align 4 880 // CHECK4-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 881 // CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8** [[TMP4]] to i32* 882 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP5]], align 4 883 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 884 // CHECK4-NEXT: store i8* null, i8** [[TMP6]], align 4 885 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 886 // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 887 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 2) 888 // CHECK4-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 889 // CHECK4-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 890 // CHECK4-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 891 // CHECK4: omp_offload.failed: 892 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64(i32 [[TMP1]]) #[[ATTR2:[0-9]+]] 893 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 894 // CHECK4: omp_offload.cont: 895 // CHECK4-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() 896 // CHECK4-NEXT: ret i32 [[CALL]] 897 // 898 // 899 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l64 900 // CHECK4-SAME: (i32 noundef [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] { 901 // CHECK4-NEXT: entry: 902 // CHECK4-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32, align 4 903 // CHECK4-NEXT: store i32 [[SIVAR]], i32* [[SIVAR_ADDR]], align 4 904 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[SIVAR_ADDR]]) 905 // CHECK4-NEXT: ret void 906 // 907 // 908 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. 909 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] { 910 // CHECK4-NEXT: entry: 911 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 912 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 913 // CHECK4-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 4 914 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 915 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 916 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 917 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 918 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 919 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 920 // CHECK4-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4 921 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 922 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 923 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 924 // CHECK4-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 4 925 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 4 926 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 927 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 928 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 929 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 930 // CHECK4-NEXT: store i32 0, i32* [[SIVAR1]], align 4 931 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 932 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 933 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 934 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 935 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 936 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 937 // CHECK4: cond.true: 938 // CHECK4-NEXT: br label [[COND_END:%.*]] 939 // CHECK4: cond.false: 940 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 941 // CHECK4-NEXT: br label [[COND_END]] 942 // CHECK4: cond.end: 943 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 944 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 945 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 946 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 947 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 948 // CHECK4: omp.inner.for.cond: 949 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 950 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 951 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 952 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 953 // CHECK4: omp.inner.for.body: 954 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 955 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 956 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 957 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 958 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 959 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !6 960 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 961 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !6 962 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 963 // CHECK4: omp.body.continue: 964 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 965 // CHECK4: omp.inner.for.inc: 966 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 967 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 968 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 969 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 970 // CHECK4: omp.inner.for.end: 971 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 972 // CHECK4: omp.loop.exit: 973 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 974 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 975 // CHECK4-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 976 // CHECK4-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 977 // CHECK4: .omp.final.then: 978 // CHECK4-NEXT: store i32 2, i32* [[I]], align 4 979 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 980 // CHECK4: .omp.final.done: 981 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4 982 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[SIVAR1]], align 4 983 // CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 984 // CHECK4-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 985 // CHECK4-NEXT: ret void 986 // 987 // 988 // CHECK4-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 989 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] comdat { 990 // CHECK4-NEXT: entry: 991 // CHECK4-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 992 // CHECK4-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 993 // CHECK4-NEXT: [[T_VAR_CASTED:%.*]] = alloca i32, align 4 994 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 995 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 996 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 997 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 998 // CHECK4-NEXT: store i32 0, i32* [[T_VAR]], align 4 999 // CHECK4-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 1000 // CHECK4-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false) 1001 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 4 1002 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[T_VAR_CASTED]], align 4 1003 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[T_VAR_CASTED]], align 4 1004 // CHECK4-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1005 // CHECK4-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to i32* 1006 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[TMP4]], align 4 1007 // CHECK4-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1008 // CHECK4-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32* 1009 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[TMP6]], align 4 1010 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1011 // CHECK4-NEXT: store i8* null, i8** [[TMP7]], align 4 1012 // CHECK4-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1013 // CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1014 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 2) 1015 // CHECK4-NEXT: [[TMP10:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, i32 1, i8** [[TMP8]], i8** [[TMP9]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) 1016 // CHECK4-NEXT: [[TMP11:%.*]] = icmp ne i32 [[TMP10]], 0 1017 // CHECK4-NEXT: br i1 [[TMP11]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1018 // CHECK4: omp_offload.failed: 1019 // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(i32 [[TMP2]]) #[[ATTR2]] 1020 // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] 1021 // CHECK4: omp_offload.cont: 1022 // CHECK4-NEXT: ret i32 0 1023 // 1024 // 1025 // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32 1026 // CHECK4-SAME: (i32 noundef [[T_VAR:%.*]]) #[[ATTR1]] { 1027 // CHECK4-NEXT: entry: 1028 // CHECK4-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32, align 4 1029 // CHECK4-NEXT: store i32 [[T_VAR]], i32* [[T_VAR_ADDR]], align 4 1030 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32* [[T_VAR_ADDR]]) 1031 // CHECK4-NEXT: ret void 1032 // 1033 // 1034 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..1 1035 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] { 1036 // CHECK4-NEXT: entry: 1037 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1038 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1039 // CHECK4-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 4 1040 // CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1041 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 1042 // CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1043 // CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1044 // CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1045 // CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1046 // CHECK4-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 1047 // CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4 1048 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1049 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1050 // CHECK4-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 4 1051 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 4 1052 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1053 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1054 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1055 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1056 // CHECK4-NEXT: store i32 0, i32* [[T_VAR1]], align 4 1057 // CHECK4-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1058 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1059 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1060 // CHECK4-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1061 // CHECK4-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 1062 // CHECK4-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1063 // CHECK4: cond.true: 1064 // CHECK4-NEXT: br label [[COND_END:%.*]] 1065 // CHECK4: cond.false: 1066 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1067 // CHECK4-NEXT: br label [[COND_END]] 1068 // CHECK4: cond.end: 1069 // CHECK4-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1070 // CHECK4-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1071 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1072 // CHECK4-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1073 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1074 // CHECK4: omp.inner.for.cond: 1075 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 1076 // CHECK4-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !12 1077 // CHECK4-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1078 // CHECK4-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1079 // CHECK4: omp.inner.for.body: 1080 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 1081 // CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 1082 // CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1083 // CHECK4-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !12 1084 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !12 1085 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !12 1086 // CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 1087 // CHECK4-NEXT: store i32 [[ADD3]], i32* [[T_VAR1]], align 4, !llvm.access.group !12 1088 // CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1089 // CHECK4: omp.body.continue: 1090 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1091 // CHECK4: omp.inner.for.inc: 1092 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 1093 // CHECK4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1 1094 // CHECK4-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !12 1095 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]] 1096 // CHECK4: omp.inner.for.end: 1097 // CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1098 // CHECK4: omp.loop.exit: 1099 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1100 // CHECK4-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1101 // CHECK4-NEXT: [[TMP13:%.*]] = icmp ne i32 [[TMP12]], 0 1102 // CHECK4-NEXT: br i1 [[TMP13]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1103 // CHECK4: .omp.final.then: 1104 // CHECK4-NEXT: store i32 2, i32* [[I]], align 4 1105 // CHECK4-NEXT: br label [[DOTOMP_FINAL_DONE]] 1106 // CHECK4: .omp.final.done: 1107 // CHECK4-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP0]], align 4 1108 // CHECK4-NEXT: [[TMP15:%.*]] = load i32, i32* [[T_VAR1]], align 4 1109 // CHECK4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]] 1110 // CHECK4-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 1111 // CHECK4-NEXT: ret void 1112 // 1113 // 1114 // CHECK4-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1115 // CHECK4-SAME: () #[[ATTR5:[0-9]+]] { 1116 // CHECK4-NEXT: entry: 1117 // CHECK4-NEXT: call void @__tgt_register_requires(i64 1) 1118 // CHECK4-NEXT: ret void 1119 // 1120 // 1121 // CHECK5-LABEL: define {{[^@]+}}@main 1122 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] { 1123 // CHECK5-NEXT: entry: 1124 // CHECK5-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1125 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 1126 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1127 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1128 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1129 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 1130 // CHECK5-NEXT: [[SIVAR:%.*]] = alloca i32, align 4 1131 // CHECK5-NEXT: store i32 0, i32* [[RETVAL]], align 4 1132 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1133 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1134 // CHECK5-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1135 // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 1136 // CHECK5-NEXT: store i32 0, i32* [[SIVAR]], align 4 1137 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1138 // CHECK5: omp.inner.for.cond: 1139 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1140 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 1141 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 1142 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1143 // CHECK5: omp.inner.for.body: 1144 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1145 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 1146 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1147 // CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 1148 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2 1149 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !2 1150 // CHECK5-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] 1151 // CHECK5-NEXT: store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !2 1152 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1153 // CHECK5: omp.body.continue: 1154 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1155 // CHECK5: omp.inner.for.inc: 1156 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1157 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 1158 // CHECK5-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1159 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 1160 // CHECK5: omp.inner.for.end: 1161 // CHECK5-NEXT: store i32 2, i32* [[I]], align 4 1162 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 1163 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4 1164 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] 1165 // CHECK5-NEXT: store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4 1166 // CHECK5-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() 1167 // CHECK5-NEXT: ret i32 [[CALL]] 1168 // 1169 // 1170 // CHECK5-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 1171 // CHECK5-SAME: () #[[ATTR1:[0-9]+]] comdat { 1172 // CHECK5-NEXT: entry: 1173 // CHECK5-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 1174 // CHECK5-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 1175 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 1176 // CHECK5-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1177 // CHECK5-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1178 // CHECK5-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1179 // CHECK5-NEXT: [[I:%.*]] = alloca i32, align 4 1180 // CHECK5-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 1181 // CHECK5-NEXT: store i32 0, i32* [[T_VAR]], align 4 1182 // CHECK5-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 1183 // CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 1184 // CHECK5-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1185 // CHECK5-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1186 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1187 // CHECK5-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4 1188 // CHECK5-NEXT: store i32 0, i32* [[T_VAR1]], align 4 1189 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1190 // CHECK5: omp.inner.for.cond: 1191 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 1192 // CHECK5-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 1193 // CHECK5-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] 1194 // CHECK5-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1195 // CHECK5: omp.inner.for.body: 1196 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 1197 // CHECK5-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 1198 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1199 // CHECK5-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 1200 // CHECK5-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 1201 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !6 1202 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] 1203 // CHECK5-NEXT: store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !6 1204 // CHECK5-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1205 // CHECK5: omp.body.continue: 1206 // CHECK5-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1207 // CHECK5: omp.inner.for.inc: 1208 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 1209 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 1210 // CHECK5-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 1211 // CHECK5-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 1212 // CHECK5: omp.inner.for.end: 1213 // CHECK5-NEXT: store i32 2, i32* [[I]], align 4 1214 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4 1215 // CHECK5-NEXT: [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4 1216 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] 1217 // CHECK5-NEXT: store i32 [[ADD4]], i32* [[T_VAR]], align 4 1218 // CHECK5-NEXT: ret i32 0 1219 // 1220 // 1221 // CHECK6-LABEL: define {{[^@]+}}@main 1222 // CHECK6-SAME: () #[[ATTR0:[0-9]+]] { 1223 // CHECK6-NEXT: entry: 1224 // CHECK6-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1225 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 1226 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1227 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1228 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1229 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 1230 // CHECK6-NEXT: [[SIVAR:%.*]] = alloca i32, align 4 1231 // CHECK6-NEXT: store i32 0, i32* [[RETVAL]], align 4 1232 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1233 // CHECK6-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1234 // CHECK6-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1235 // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 1236 // CHECK6-NEXT: store i32 0, i32* [[SIVAR]], align 4 1237 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1238 // CHECK6: omp.inner.for.cond: 1239 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1240 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !2 1241 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 1242 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1243 // CHECK6: omp.inner.for.body: 1244 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1245 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 1246 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1247 // CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !2 1248 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !2 1249 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !2 1250 // CHECK6-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] 1251 // CHECK6-NEXT: store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !2 1252 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1253 // CHECK6: omp.body.continue: 1254 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1255 // CHECK6: omp.inner.for.inc: 1256 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1257 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 1258 // CHECK6-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !2 1259 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]] 1260 // CHECK6: omp.inner.for.end: 1261 // CHECK6-NEXT: store i32 2, i32* [[I]], align 4 1262 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 1263 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4 1264 // CHECK6-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] 1265 // CHECK6-NEXT: store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4 1266 // CHECK6-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v() 1267 // CHECK6-NEXT: ret i32 [[CALL]] 1268 // 1269 // 1270 // CHECK6-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 1271 // CHECK6-SAME: () #[[ATTR1:[0-9]+]] comdat { 1272 // CHECK6-NEXT: entry: 1273 // CHECK6-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 1274 // CHECK6-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 1275 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 1276 // CHECK6-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1277 // CHECK6-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1278 // CHECK6-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1279 // CHECK6-NEXT: [[I:%.*]] = alloca i32, align 4 1280 // CHECK6-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 1281 // CHECK6-NEXT: store i32 0, i32* [[T_VAR]], align 4 1282 // CHECK6-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 1283 // CHECK6-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 1284 // CHECK6-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1285 // CHECK6-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1286 // CHECK6-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1287 // CHECK6-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4 1288 // CHECK6-NEXT: store i32 0, i32* [[T_VAR1]], align 4 1289 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1290 // CHECK6: omp.inner.for.cond: 1291 // CHECK6-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 1292 // CHECK6-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !6 1293 // CHECK6-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] 1294 // CHECK6-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1295 // CHECK6: omp.inner.for.body: 1296 // CHECK6-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 1297 // CHECK6-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 1298 // CHECK6-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1299 // CHECK6-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !6 1300 // CHECK6-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !6 1301 // CHECK6-NEXT: [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !6 1302 // CHECK6-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] 1303 // CHECK6-NEXT: store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !6 1304 // CHECK6-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1305 // CHECK6: omp.body.continue: 1306 // CHECK6-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1307 // CHECK6: omp.inner.for.inc: 1308 // CHECK6-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 1309 // CHECK6-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 1310 // CHECK6-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !6 1311 // CHECK6-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]] 1312 // CHECK6: omp.inner.for.end: 1313 // CHECK6-NEXT: store i32 2, i32* [[I]], align 4 1314 // CHECK6-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4 1315 // CHECK6-NEXT: [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4 1316 // CHECK6-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] 1317 // CHECK6-NEXT: store i32 [[ADD4]], i32* [[T_VAR]], align 4 1318 // CHECK6-NEXT: ret i32 0 1319 // 1320 // 1321 // CHECK7-LABEL: define {{[^@]+}}@main 1322 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] { 1323 // CHECK7-NEXT: entry: 1324 // CHECK7-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1325 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 1326 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1327 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1328 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1329 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 1330 // CHECK7-NEXT: [[SIVAR:%.*]] = alloca i32, align 4 1331 // CHECK7-NEXT: store i32 0, i32* [[RETVAL]], align 4 1332 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1333 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1334 // CHECK7-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1335 // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 1336 // CHECK7-NEXT: store i32 0, i32* [[SIVAR]], align 4 1337 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1338 // CHECK7: omp.inner.for.cond: 1339 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1340 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 1341 // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 1342 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1343 // CHECK7: omp.inner.for.body: 1344 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1345 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 1346 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1347 // CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 1348 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3 1349 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !3 1350 // CHECK7-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] 1351 // CHECK7-NEXT: store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !3 1352 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1353 // CHECK7: omp.body.continue: 1354 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1355 // CHECK7: omp.inner.for.inc: 1356 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1357 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 1358 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1359 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 1360 // CHECK7: omp.inner.for.end: 1361 // CHECK7-NEXT: store i32 2, i32* [[I]], align 4 1362 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 1363 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4 1364 // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] 1365 // CHECK7-NEXT: store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4 1366 // CHECK7-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() 1367 // CHECK7-NEXT: ret i32 [[CALL]] 1368 // 1369 // 1370 // CHECK7-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 1371 // CHECK7-SAME: () #[[ATTR1:[0-9]+]] comdat { 1372 // CHECK7-NEXT: entry: 1373 // CHECK7-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 1374 // CHECK7-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 1375 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 1376 // CHECK7-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1377 // CHECK7-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1378 // CHECK7-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1379 // CHECK7-NEXT: [[I:%.*]] = alloca i32, align 4 1380 // CHECK7-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 1381 // CHECK7-NEXT: store i32 0, i32* [[T_VAR]], align 4 1382 // CHECK7-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 1383 // CHECK7-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false) 1384 // CHECK7-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1385 // CHECK7-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1386 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1387 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4 1388 // CHECK7-NEXT: store i32 0, i32* [[T_VAR1]], align 4 1389 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1390 // CHECK7: omp.inner.for.cond: 1391 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1392 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7 1393 // CHECK7-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] 1394 // CHECK7-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1395 // CHECK7: omp.inner.for.body: 1396 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1397 // CHECK7-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 1398 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1399 // CHECK7-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !7 1400 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7 1401 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !7 1402 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] 1403 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !7 1404 // CHECK7-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1405 // CHECK7: omp.body.continue: 1406 // CHECK7-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1407 // CHECK7: omp.inner.for.inc: 1408 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1409 // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 1410 // CHECK7-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1411 // CHECK7-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] 1412 // CHECK7: omp.inner.for.end: 1413 // CHECK7-NEXT: store i32 2, i32* [[I]], align 4 1414 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4 1415 // CHECK7-NEXT: [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4 1416 // CHECK7-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] 1417 // CHECK7-NEXT: store i32 [[ADD4]], i32* [[T_VAR]], align 4 1418 // CHECK7-NEXT: ret i32 0 1419 // 1420 // 1421 // CHECK8-LABEL: define {{[^@]+}}@main 1422 // CHECK8-SAME: () #[[ATTR0:[0-9]+]] { 1423 // CHECK8-NEXT: entry: 1424 // CHECK8-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1425 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 1426 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1427 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1428 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1429 // CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4 1430 // CHECK8-NEXT: [[SIVAR:%.*]] = alloca i32, align 4 1431 // CHECK8-NEXT: store i32 0, i32* [[RETVAL]], align 4 1432 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1433 // CHECK8-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1434 // CHECK8-NEXT: [[TMP0:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1435 // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTOMP_IV]], align 4 1436 // CHECK8-NEXT: store i32 0, i32* [[SIVAR]], align 4 1437 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1438 // CHECK8: omp.inner.for.cond: 1439 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1440 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !3 1441 // CHECK8-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[TMP2]] 1442 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1443 // CHECK8: omp.inner.for.body: 1444 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1445 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1 1446 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1447 // CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !3 1448 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !3 1449 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[SIVAR]], align 4, !llvm.access.group !3 1450 // CHECK8-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP5]], [[TMP4]] 1451 // CHECK8-NEXT: store i32 [[ADD1]], i32* [[SIVAR]], align 4, !llvm.access.group !3 1452 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1453 // CHECK8: omp.body.continue: 1454 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1455 // CHECK8: omp.inner.for.inc: 1456 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1457 // CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1 1458 // CHECK8-NEXT: store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !3 1459 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]] 1460 // CHECK8: omp.inner.for.end: 1461 // CHECK8-NEXT: store i32 2, i32* [[I]], align 4 1462 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 1463 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[SIVAR]], align 4 1464 // CHECK8-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], [[TMP8]] 1465 // CHECK8-NEXT: store i32 [[ADD3]], i32* @_ZZ4mainE5sivar, align 4 1466 // CHECK8-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() 1467 // CHECK8-NEXT: ret i32 [[CALL]] 1468 // 1469 // 1470 // CHECK8-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 1471 // CHECK8-SAME: () #[[ATTR1:[0-9]+]] comdat { 1472 // CHECK8-NEXT: entry: 1473 // CHECK8-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 1474 // CHECK8-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 1475 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 1476 // CHECK8-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1477 // CHECK8-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1478 // CHECK8-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1479 // CHECK8-NEXT: [[I:%.*]] = alloca i32, align 4 1480 // CHECK8-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 1481 // CHECK8-NEXT: store i32 0, i32* [[T_VAR]], align 4 1482 // CHECK8-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 1483 // CHECK8-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i32 8, i1 false) 1484 // CHECK8-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1485 // CHECK8-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1486 // CHECK8-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1487 // CHECK8-NEXT: store i32 [[TMP1]], i32* [[DOTOMP_IV]], align 4 1488 // CHECK8-NEXT: store i32 0, i32* [[T_VAR1]], align 4 1489 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1490 // CHECK8: omp.inner.for.cond: 1491 // CHECK8-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1492 // CHECK8-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !7 1493 // CHECK8-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP2]], [[TMP3]] 1494 // CHECK8-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1495 // CHECK8: omp.inner.for.body: 1496 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1497 // CHECK8-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1 1498 // CHECK8-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1499 // CHECK8-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !7 1500 // CHECK8-NEXT: [[TMP5:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !7 1501 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[T_VAR1]], align 4, !llvm.access.group !7 1502 // CHECK8-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]] 1503 // CHECK8-NEXT: store i32 [[ADD2]], i32* [[T_VAR1]], align 4, !llvm.access.group !7 1504 // CHECK8-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1505 // CHECK8: omp.body.continue: 1506 // CHECK8-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1507 // CHECK8: omp.inner.for.inc: 1508 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1509 // CHECK8-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP7]], 1 1510 // CHECK8-NEXT: store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !7 1511 // CHECK8-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP8:![0-9]+]] 1512 // CHECK8: omp.inner.for.end: 1513 // CHECK8-NEXT: store i32 2, i32* [[I]], align 4 1514 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[T_VAR]], align 4 1515 // CHECK8-NEXT: [[TMP9:%.*]] = load i32, i32* [[T_VAR1]], align 4 1516 // CHECK8-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP8]], [[TMP9]] 1517 // CHECK8-NEXT: store i32 [[ADD4]], i32* [[T_VAR]], align 4 1518 // CHECK8-NEXT: ret i32 0 1519 // 1520 // 1521 // CHECK9-LABEL: define {{[^@]+}}@main 1522 // CHECK9-SAME: () #[[ATTR0:[0-9]+]] { 1523 // CHECK9-NEXT: entry: 1524 // CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1525 // CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1 1526 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 1527 // CHECK9-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]]) 1528 // CHECK9-NEXT: ret i32 0 1529 // 1530 // 1531 // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l45 1532 // CHECK9-SAME: (i64 noundef [[SIVAR:%.*]]) #[[ATTR2:[0-9]+]] { 1533 // CHECK9-NEXT: entry: 1534 // CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 1535 // CHECK9-NEXT: store i64 [[SIVAR]], i64* [[SIVAR_ADDR]], align 8 1536 // CHECK9-NEXT: [[CONV:%.*]] = bitcast i64* [[SIVAR_ADDR]] to i32* 1537 // CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]]) 1538 // CHECK9-NEXT: ret void 1539 // 1540 // 1541 // CHECK9-LABEL: define {{[^@]+}}@.omp_outlined. 1542 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] { 1543 // CHECK9-NEXT: entry: 1544 // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1545 // CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1546 // CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8 1547 // CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1548 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 1549 // CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1550 // CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1551 // CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1552 // CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1553 // CHECK9-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4 1554 // CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4 1555 // CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1556 // CHECK9-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1557 // CHECK9-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1558 // CHECK9-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8 1559 // CHECK9-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8 1560 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1561 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1562 // CHECK9-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1563 // CHECK9-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1564 // CHECK9-NEXT: store i32 0, i32* [[SIVAR1]], align 4 1565 // CHECK9-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1566 // CHECK9-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1567 // CHECK9-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1568 // CHECK9-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1569 // CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 1570 // CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1571 // CHECK9: cond.true: 1572 // CHECK9-NEXT: br label [[COND_END:%.*]] 1573 // CHECK9: cond.false: 1574 // CHECK9-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1575 // CHECK9-NEXT: br label [[COND_END]] 1576 // CHECK9: cond.end: 1577 // CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1578 // CHECK9-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1579 // CHECK9-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1580 // CHECK9-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1581 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1582 // CHECK9: omp.inner.for.cond: 1583 // CHECK9-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 1584 // CHECK9-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4 1585 // CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1586 // CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1587 // CHECK9: omp.inner.for.body: 1588 // CHECK9-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 1589 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 1590 // CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1591 // CHECK9-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4 1592 // CHECK9-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !4 1593 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !4 1594 // CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 1595 // CHECK9-NEXT: store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !4 1596 // CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0 1597 // CHECK9-NEXT: store i32* [[SIVAR1]], i32** [[TMP11]], align 8, !llvm.access.group !4 1598 // CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group !4 1599 // CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1600 // CHECK9: omp.body.continue: 1601 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1602 // CHECK9: omp.inner.for.inc: 1603 // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 1604 // CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 1605 // CHECK9-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 1606 // CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] 1607 // CHECK9: omp.inner.for.end: 1608 // CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1609 // CHECK9: omp.loop.exit: 1610 // CHECK9-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1611 // CHECK9-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1612 // CHECK9-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 1613 // CHECK9-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1614 // CHECK9: .omp.final.then: 1615 // CHECK9-NEXT: store i32 2, i32* [[I]], align 4 1616 // CHECK9-NEXT: br label [[DOTOMP_FINAL_DONE]] 1617 // CHECK9: .omp.final.done: 1618 // CHECK9-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP0]], align 4 1619 // CHECK9-NEXT: [[TMP16:%.*]] = load i32, i32* [[SIVAR1]], align 4 1620 // CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 1621 // CHECK9-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 1622 // CHECK9-NEXT: ret void 1623 // 1624 // 1625 // CHECK9-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1626 // CHECK9-SAME: () #[[ATTR4:[0-9]+]] { 1627 // CHECK9-NEXT: entry: 1628 // CHECK9-NEXT: call void @__tgt_register_requires(i64 1) 1629 // CHECK9-NEXT: ret void 1630 // 1631 // 1632 // CHECK10-LABEL: define {{[^@]+}}@main 1633 // CHECK10-SAME: () #[[ATTR0:[0-9]+]] { 1634 // CHECK10-NEXT: entry: 1635 // CHECK10-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1636 // CHECK10-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1 1637 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 1638 // CHECK10-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]]) 1639 // CHECK10-NEXT: ret i32 0 1640 // 1641 // 1642 // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l45 1643 // CHECK10-SAME: (i64 noundef [[SIVAR:%.*]]) #[[ATTR2:[0-9]+]] { 1644 // CHECK10-NEXT: entry: 1645 // CHECK10-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8 1646 // CHECK10-NEXT: store i64 [[SIVAR]], i64* [[SIVAR_ADDR]], align 8 1647 // CHECK10-NEXT: [[CONV:%.*]] = bitcast i64* [[SIVAR_ADDR]] to i32* 1648 // CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[CONV]]) 1649 // CHECK10-NEXT: ret void 1650 // 1651 // 1652 // CHECK10-LABEL: define {{[^@]+}}@.omp_outlined. 1653 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] { 1654 // CHECK10-NEXT: entry: 1655 // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1656 // CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1657 // CHECK10-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8 1658 // CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1659 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 1660 // CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1661 // CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1662 // CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1663 // CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1664 // CHECK10-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4 1665 // CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4 1666 // CHECK10-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1667 // CHECK10-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1668 // CHECK10-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1669 // CHECK10-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8 1670 // CHECK10-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8 1671 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1672 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_UB]], align 4 1673 // CHECK10-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1674 // CHECK10-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1675 // CHECK10-NEXT: store i32 0, i32* [[SIVAR1]], align 4 1676 // CHECK10-NEXT: [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1677 // CHECK10-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4 1678 // CHECK10-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1679 // CHECK10-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1680 // CHECK10-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1 1681 // CHECK10-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1682 // CHECK10: cond.true: 1683 // CHECK10-NEXT: br label [[COND_END:%.*]] 1684 // CHECK10: cond.false: 1685 // CHECK10-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1686 // CHECK10-NEXT: br label [[COND_END]] 1687 // CHECK10: cond.end: 1688 // CHECK10-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 1689 // CHECK10-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1690 // CHECK10-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1691 // CHECK10-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_IV]], align 4 1692 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1693 // CHECK10: omp.inner.for.cond: 1694 // CHECK10-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 1695 // CHECK10-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4, !llvm.access.group !4 1696 // CHECK10-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 1697 // CHECK10-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1698 // CHECK10: omp.inner.for.body: 1699 // CHECK10-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 1700 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1 1701 // CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1702 // CHECK10-NEXT: store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group !4 1703 // CHECK10-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4, !llvm.access.group !4 1704 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIVAR1]], align 4, !llvm.access.group !4 1705 // CHECK10-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]] 1706 // CHECK10-NEXT: store i32 [[ADD3]], i32* [[SIVAR1]], align 4, !llvm.access.group !4 1707 // CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0 1708 // CHECK10-NEXT: store i32* [[SIVAR1]], i32** [[TMP11]], align 8, !llvm.access.group !4 1709 // CHECK10-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[REF_TMP]]), !llvm.access.group !4 1710 // CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1711 // CHECK10: omp.body.continue: 1712 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1713 // CHECK10: omp.inner.for.inc: 1714 // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 1715 // CHECK10-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1 1716 // CHECK10-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group !4 1717 // CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] 1718 // CHECK10: omp.inner.for.end: 1719 // CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1720 // CHECK10: omp.loop.exit: 1721 // CHECK10-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]]) 1722 // CHECK10-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4 1723 // CHECK10-NEXT: [[TMP14:%.*]] = icmp ne i32 [[TMP13]], 0 1724 // CHECK10-NEXT: br i1 [[TMP14]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]] 1725 // CHECK10: .omp.final.then: 1726 // CHECK10-NEXT: store i32 2, i32* [[I]], align 4 1727 // CHECK10-NEXT: br label [[DOTOMP_FINAL_DONE]] 1728 // CHECK10: .omp.final.done: 1729 // CHECK10-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP0]], align 4 1730 // CHECK10-NEXT: [[TMP16:%.*]] = load i32, i32* [[SIVAR1]], align 4 1731 // CHECK10-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 1732 // CHECK10-NEXT: store i32 [[ADD5]], i32* [[TMP0]], align 4 1733 // CHECK10-NEXT: ret void 1734 // 1735 // 1736 // CHECK10-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1737 // CHECK10-SAME: () #[[ATTR4:[0-9]+]] { 1738 // CHECK10-NEXT: entry: 1739 // CHECK10-NEXT: call void @__tgt_register_requires(i64 1) 1740 // CHECK10-NEXT: ret void 1741 // 1742 // 1743 // CHECK11-LABEL: define {{[^@]+}}@main 1744 // CHECK11-SAME: () #[[ATTR0:[0-9]+]] { 1745 // CHECK11-NEXT: entry: 1746 // CHECK11-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1747 // CHECK11-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1 1748 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 1749 // CHECK11-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]]) 1750 // CHECK11-NEXT: ret i32 0 1751 // 1752 // 1753 // CHECK12-LABEL: define {{[^@]+}}@main 1754 // CHECK12-SAME: () #[[ATTR0:[0-9]+]] { 1755 // CHECK12-NEXT: entry: 1756 // CHECK12-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1757 // CHECK12-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1 1758 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 1759 // CHECK12-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]]) 1760 // CHECK12-NEXT: ret i32 0 1761 // 1762