1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-cuda-mode -x c++ \ 3 // RUN: -triple powerpc64le-unknown-unknown -DCUDA \ 4 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o \ 5 // RUN: %t-ppc-host.bc 6 7 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-cuda-mode -x c++ \ 8 // RUN: -triple nvptx64-unknown-unknown -DCUA \ 9 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda -DCUDA -emit-llvm %s \ 10 // RUN: -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc \ 11 // RUN: -o - | FileCheck %s --check-prefix CHECK 12 13 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ \ 14 // RUN: -triple powerpc64le-unknown-unknown -DDIAG\ 15 // RUN: -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm \ 16 // RUN: %s -o - | FileCheck %s \ 17 // RUN: --check-prefix=CHECK1 18 19 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ \ 20 // RUN: -triple i386-unknown-unknown \ 21 // RUN: -fopenmp-targets=i386-pc-linux-gnu -emit-llvm \ 22 // RUN: %s -o - | FileCheck %s \ 23 // RUN: --check-prefix=CHECK2 24 25 26 #if defined(CUDA) 27 // expected-no-diagnostics 28 29 int foo(int n) { 30 double *e; 31 //no error and no implicit map generated for e[:1] 32 #pragma omp target parallel reduction(+: e[:1]) 33 *e=10; 34 ; 35 return 0; 36 } 37 // CHECK-NOT @.offload_maptypes 38 #elif defined(DIAG) 39 class S2 { 40 mutable int a; 41 public: 42 S2():a(0) { } 43 S2(S2 &s2):a(s2.a) { } 44 S2 &operator +(S2 &s); 45 }; 46 int bar() { 47 S2 o[5]; 48 //warnig "copyable and not guaranteed to be mapped correctly" and 49 //implicit map generated. 50 #pragma omp target parallel reduction(+:o[0]) //expected-warning {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} 51 for (int i = 0; i < 10; i++); 52 double b[10][10][10]; 53 //no error no implicit map generated, the map for b is generated but not 54 //for b[0:2][2:4][1]. 55 #pragma omp target parallel for reduction(task, +: b[0:2][2:4][1]) 56 for (long long i = 0; i < 10; ++i); 57 return 0; 58 } 59 // map for variable o 60 // map for b: 61 #else 62 // expected-no-diagnostics 63 64 // generate implicit map for array elements or array sections in reduction 65 // clause. In following case: the implicit map is generate for output[0] 66 // with map size 4 and output[:3] with map size 12. 67 void sum(int* input, int size, int* output) 68 { 69 #pragma omp target teams distribute parallel for reduction(+: output[0]) \ 70 map(to: input [0:size]) 71 for (int i = 0; i < size; i++) 72 output[0] += input[i]; 73 #pragma omp target teams distribute parallel for reduction(+: output[:3]) \ 74 map(to: input [0:size]) 75 for (int i = 0; i < size; i++) 76 output[0] += input[i]; 77 int a[10]; 78 #pragma omp target parallel reduction(+: a[:2]) 79 for (int i = 0; i < size; i++) 80 ; 81 #pragma omp target parallel reduction(+: a[3]) 82 for (int i = 0; i < size; i++) 83 ; 84 } 85 #endif 86 int main() 87 { 88 #if defined(CUDA) 89 int a = foo(10); 90 #elif defined(DIAG) 91 int a = bar(); 92 #else 93 const int size = 100; 94 int *array = new int[size]; 95 int result = 0; 96 sum(array, size, &result); 97 #endif 98 return 0; 99 } 100 // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l32 101 // CHECK-SAME: (double* noundef [[E:%.*]]) #[[ATTR0:[0-9]+]] { 102 // CHECK-NEXT: entry: 103 // CHECK-NEXT: [[E_ADDR:%.*]] = alloca double*, align 8 104 // CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 105 // CHECK-NEXT: store double* [[E]], double** [[E_ADDR]], align 8 106 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true) 107 // CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 108 // CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 109 // CHECK: user_code.entry: 110 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 111 // CHECK-NEXT: [[TMP2:%.*]] = load double*, double** [[E_ADDR]], align 8 112 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 113 // CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[TMP2]] to i8* 114 // CHECK-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8 115 // CHECK-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 116 // CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, double*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP5]], i64 1) 117 // CHECK-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 118 // CHECK-NEXT: ret void 119 // CHECK: worker.exit: 120 // CHECK-NEXT: ret void 121 // 122 // 123 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__ 124 // CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], double* noundef [[E:%.*]]) #[[ATTR1:[0-9]+]] { 125 // CHECK-NEXT: entry: 126 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 127 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 128 // CHECK-NEXT: [[E_ADDR:%.*]] = alloca double*, align 8 129 // CHECK-NEXT: [[E2:%.*]] = alloca double, align 8 130 // CHECK-NEXT: [[TMP:%.*]] = alloca double*, align 8 131 // CHECK-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 132 // CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 133 // CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 134 // CHECK-NEXT: store double* [[E]], double** [[E_ADDR]], align 8 135 // CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[E_ADDR]], align 8 136 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP0]], i64 0 137 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[E_ADDR]], align 8 138 // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[TMP1]], i64 0 139 // CHECK-NEXT: store double 0.000000e+00, double* [[E2]], align 8 140 // CHECK-NEXT: [[TMP2:%.*]] = load double*, double** [[E_ADDR]], align 8 141 // CHECK-NEXT: [[TMP3:%.*]] = ptrtoint double* [[TMP2]] to i64 142 // CHECK-NEXT: [[TMP4:%.*]] = ptrtoint double* [[ARRAYIDX]] to i64 143 // CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]] 144 // CHECK-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 145 // CHECK-NEXT: [[TMP7:%.*]] = getelementptr double, double* [[E2]], i64 [[TMP6]] 146 // CHECK-NEXT: store double* [[TMP7]], double** [[TMP]], align 8 147 // CHECK-NEXT: [[TMP8:%.*]] = load double*, double** [[TMP]], align 8 148 // CHECK-NEXT: store double 1.000000e+01, double* [[TMP8]], align 8 149 // CHECK-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 150 // CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 151 // CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 152 // CHECK-NEXT: [[TMP12:%.*]] = bitcast double* [[E2]] to i8* 153 // CHECK-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8 154 // CHECK-NEXT: [[TMP13:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 155 // CHECK-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(%struct.ident_t* @[[GLOB2]], i32 [[TMP10]], i32 1, i64 8, i8* [[TMP13]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func) 156 // CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[TMP14]], 1 157 // CHECK-NEXT: br i1 [[TMP15]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]] 158 // CHECK: .omp.reduction.then: 159 // CHECK-NEXT: [[TMP16:%.*]] = load double, double* [[ARRAYIDX]], align 8 160 // CHECK-NEXT: [[TMP17:%.*]] = load double, double* [[E2]], align 8 161 // CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP16]], [[TMP17]] 162 // CHECK-NEXT: store double [[ADD]], double* [[ARRAYIDX]], align 8 163 // CHECK-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP10]]) 164 // CHECK-NEXT: br label [[DOTOMP_REDUCTION_DONE]] 165 // CHECK: .omp.reduction.done: 166 // CHECK-NEXT: ret void 167 // 168 // 169 // CHECK-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func 170 // CHECK-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] { 171 // CHECK-NEXT: entry: 172 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 173 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2 174 // CHECK-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2 175 // CHECK-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2 176 // CHECK-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x i8*], align 8 177 // CHECK-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8 178 // CHECK-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 179 // CHECK-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2 180 // CHECK-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2 181 // CHECK-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2 182 // CHECK-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 8 183 // CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 184 // CHECK-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2 185 // CHECK-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2 186 // CHECK-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2 187 // CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 188 // CHECK-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to double** 189 // CHECK-NEXT: [[TMP11:%.*]] = load double*, double** [[TMP10]], align 8 190 // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 191 // CHECK-NEXT: [[TMP13:%.*]] = getelementptr double, double* [[TMP11]], i64 1 192 // CHECK-NEXT: [[TMP14:%.*]] = bitcast double* [[TMP13]] to i8* 193 // CHECK-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP11]] to i64* 194 // CHECK-NEXT: [[TMP16:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i64* 195 // CHECK-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP15]], align 8 196 // CHECK-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_get_warp_size() 197 // CHECK-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16 198 // CHECK-NEXT: [[TMP20:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP17]], i16 [[TMP7]], i16 [[TMP19]]) 199 // CHECK-NEXT: store i64 [[TMP20]], i64* [[TMP16]], align 8 200 // CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, i64* [[TMP15]], i64 1 201 // CHECK-NEXT: [[TMP22:%.*]] = getelementptr i64, i64* [[TMP16]], i64 1 202 // CHECK-NEXT: [[TMP23:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i8* 203 // CHECK-NEXT: store i8* [[TMP23]], i8** [[TMP12]], align 8 204 // CHECK-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP8]], 0 205 // CHECK-NEXT: [[TMP25:%.*]] = icmp eq i16 [[TMP8]], 1 206 // CHECK-NEXT: [[TMP26:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]] 207 // CHECK-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]] 208 // CHECK-NEXT: [[TMP28:%.*]] = icmp eq i16 [[TMP8]], 2 209 // CHECK-NEXT: [[TMP29:%.*]] = and i16 [[TMP6]], 1 210 // CHECK-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP29]], 0 211 // CHECK-NEXT: [[TMP31:%.*]] = and i1 [[TMP28]], [[TMP30]] 212 // CHECK-NEXT: [[TMP32:%.*]] = icmp sgt i16 [[TMP7]], 0 213 // CHECK-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]] 214 // CHECK-NEXT: [[TMP34:%.*]] = or i1 [[TMP24]], [[TMP27]] 215 // CHECK-NEXT: [[TMP35:%.*]] = or i1 [[TMP34]], [[TMP33]] 216 // CHECK-NEXT: br i1 [[TMP35]], label [[THEN:%.*]], label [[ELSE:%.*]] 217 // CHECK: then: 218 // CHECK-NEXT: [[TMP36:%.*]] = bitcast [1 x i8*]* [[TMP5]] to i8* 219 // CHECK-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8* 220 // CHECK-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP36]], i8* [[TMP37]]) #[[ATTR3:[0-9]+]] 221 // CHECK-NEXT: br label [[IFCONT:%.*]] 222 // CHECK: else: 223 // CHECK-NEXT: br label [[IFCONT]] 224 // CHECK: ifcont: 225 // CHECK-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1 226 // CHECK-NEXT: [[TMP39:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]] 227 // CHECK-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]] 228 // CHECK-NEXT: br i1 [[TMP40]], label [[THEN4:%.*]], label [[ELSE5:%.*]] 229 // CHECK: then4: 230 // CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 231 // CHECK-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to double** 232 // CHECK-NEXT: [[TMP43:%.*]] = load double*, double** [[TMP42]], align 8 233 // CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 234 // CHECK-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to double** 235 // CHECK-NEXT: [[TMP46:%.*]] = load double*, double** [[TMP45]], align 8 236 // CHECK-NEXT: [[TMP47:%.*]] = load double, double* [[TMP43]], align 8 237 // CHECK-NEXT: store double [[TMP47]], double* [[TMP46]], align 8 238 // CHECK-NEXT: br label [[IFCONT6:%.*]] 239 // CHECK: else5: 240 // CHECK-NEXT: br label [[IFCONT6]] 241 // CHECK: ifcont6: 242 // CHECK-NEXT: ret void 243 // 244 // 245 // CHECK-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func 246 // CHECK-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] { 247 // CHECK-NEXT: entry: 248 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 249 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 250 // CHECK-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4 251 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 252 // CHECK-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 253 // CHECK-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 254 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() 255 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() 256 // CHECK-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31 257 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() 258 // CHECK-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5 259 // CHECK-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8 260 // CHECK-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [1 x i8*]* 261 // CHECK-NEXT: store i32 0, i32* [[DOTCNT_ADDR]], align 4 262 // CHECK-NEXT: br label [[PRECOND:%.*]] 263 // CHECK: precond: 264 // CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCNT_ADDR]], align 4 265 // CHECK-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 2 266 // CHECK-NEXT: br i1 [[TMP9]], label [[BODY:%.*]], label [[EXIT:%.*]] 267 // CHECK: body: 268 // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 269 // CHECK-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0 270 // CHECK-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]] 271 // CHECK: then: 272 // CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0 273 // CHECK-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP10]], align 8 274 // CHECK-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* 275 // CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP12]], i32 [[TMP8]] 276 // CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]] 277 // CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]], align 4 278 // CHECK-NEXT: store volatile i32 [[TMP15]], i32 addrspace(3)* [[TMP14]], align 4 279 // CHECK-NEXT: br label [[IFCONT:%.*]] 280 // CHECK: else: 281 // CHECK-NEXT: br label [[IFCONT]] 282 // CHECK: ifcont: 283 // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]) 284 // CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTADDR1]], align 4 285 // CHECK-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP16]] 286 // CHECK-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]] 287 // CHECK: then2: 288 // CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]] 289 // CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0 290 // CHECK-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8 291 // CHECK-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32* 292 // CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP20]], i32 [[TMP8]] 293 // CHECK-NEXT: [[TMP22:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP17]], align 4 294 // CHECK-NEXT: store i32 [[TMP22]], i32* [[TMP21]], align 4 295 // CHECK-NEXT: br label [[IFCONT4:%.*]] 296 // CHECK: else3: 297 // CHECK-NEXT: br label [[IFCONT4]] 298 // CHECK: ifcont4: 299 // CHECK-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP8]], 1 300 // CHECK-NEXT: store i32 [[TMP23]], i32* [[DOTCNT_ADDR]], align 4 301 // CHECK-NEXT: br label [[PRECOND]] 302 // CHECK: exit: 303 // CHECK-NEXT: ret void 304 // 305 // 306 // CHECK1-LABEL: define {{[^@]+}}@_Z3barv 307 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 308 // CHECK1-NEXT: entry: 309 // CHECK1-NEXT: [[O:%.*]] = alloca [5 x %class.S2], align 4 310 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 311 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 312 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 313 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x [10 x [10 x double]]], align 8 314 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8 315 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8 316 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8 317 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[O]], i32 0, i32 0 318 // CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[ARRAY_BEGIN]], i64 5 319 // CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] 320 // CHECK1: arrayctor.loop: 321 // CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %class.S2* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ] 322 // CHECK1-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]]) 323 // CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S2]], %class.S2* [[ARRAYCTOR_CUR]], i64 1 324 // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S2* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] 325 // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] 326 // CHECK1: arrayctor.cont: 327 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[O]], i64 0, i64 0 328 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 329 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [5 x %class.S2]** 330 // CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[TMP1]], align 8 331 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 332 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to %class.S2** 333 // CHECK1-NEXT: store %class.S2* [[ARRAYIDX]], %class.S2** [[TMP3]], align 8 334 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 335 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 336 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 337 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 338 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 339 // CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 340 // CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 341 // CHECK1: omp_offload.failed: 342 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50([5 x %class.S2]* [[O]]) #[[ATTR8:[0-9]+]] 343 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 344 // CHECK1: omp_offload.cont: 345 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 346 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x [10 x [10 x double]]]** 347 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[TMP10]], align 8 348 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 349 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x [10 x [10 x double]]]** 350 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[TMP12]], align 8 351 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0 352 // CHECK1-NEXT: store i8* null, i8** [[TMP13]], align 8 353 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 354 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 355 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.3, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 356 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 357 // CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]] 358 // CHECK1: omp_offload.failed4: 359 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55([10 x [10 x [10 x double]]]* [[B]]) #[[ATTR8]] 360 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT5]] 361 // CHECK1: omp_offload.cont5: 362 // CHECK1-NEXT: ret i32 0 363 // 364 // 365 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S2C1Ev 366 // CHECK1-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { 367 // CHECK1-NEXT: entry: 368 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8 369 // CHECK1-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8 370 // CHECK1-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8 371 // CHECK1-NEXT: call void @_ZN2S2C2Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) 372 // CHECK1-NEXT: ret void 373 // 374 // 375 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50 376 // CHECK1-SAME: ([5 x %class.S2]* noundef nonnull align 4 dereferenceable(20) [[O:%.*]]) #[[ATTR2:[0-9]+]] { 377 // CHECK1-NEXT: entry: 378 // CHECK1-NEXT: [[O_ADDR:%.*]] = alloca [5 x %class.S2]*, align 8 379 // CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[O_ADDR]], align 8 380 // CHECK1-NEXT: [[TMP0:%.*]] = load [5 x %class.S2]*, [5 x %class.S2]** [[O_ADDR]], align 8 381 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [5 x %class.S2]*)* @.omp_outlined. to void (i32*, i32*, ...)*), [5 x %class.S2]* [[TMP0]]) 382 // CHECK1-NEXT: ret void 383 // 384 // 385 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 386 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [5 x %class.S2]* noundef nonnull align 4 dereferenceable(20) [[O:%.*]]) #[[ATTR3:[0-9]+]] { 387 // CHECK1-NEXT: entry: 388 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 389 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 390 // CHECK1-NEXT: [[O_ADDR:%.*]] = alloca [5 x %class.S2]*, align 8 391 // CHECK1-NEXT: [[O1:%.*]] = alloca [[CLASS_S2:%.*]], align 4 392 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 393 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 394 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 395 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 396 // CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[O_ADDR]], align 8 397 // CHECK1-NEXT: [[TMP0:%.*]] = load [5 x %class.S2]*, [5 x %class.S2]** [[O_ADDR]], align 8 398 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[TMP0]], i64 0, i64 0 399 // CHECK1-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) 400 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast [5 x %class.S2]* [[TMP0]] to %class.S2* 401 // CHECK1-NEXT: [[TMP2:%.*]] = ptrtoint %class.S2* [[TMP1]] to i64 402 // CHECK1-NEXT: [[TMP3:%.*]] = ptrtoint %class.S2* [[ARRAYIDX]] to i64 403 // CHECK1-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] 404 // CHECK1-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (%class.S2* getelementptr ([[CLASS_S2]], %class.S2* null, i32 1) to i64) 405 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr [[CLASS_S2]], %class.S2* [[O1]], i64 [[TMP5]] 406 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast %class.S2* [[TMP6]] to [5 x %class.S2]* 407 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4 408 // CHECK1-NEXT: br label [[FOR_COND:%.*]] 409 // CHECK1: for.cond: 410 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 411 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], 10 412 // CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 413 // CHECK1: for.body: 414 // CHECK1-NEXT: br label [[FOR_INC:%.*]] 415 // CHECK1: for.inc: 416 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4 417 // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1 418 // CHECK1-NEXT: store i32 [[INC]], i32* [[I]], align 4 419 // CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] 420 // CHECK1: for.end: 421 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 422 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast %class.S2* [[O1]] to i8* 423 // CHECK1-NEXT: store i8* [[TMP11]], i8** [[TMP10]], align 8 424 // CHECK1-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 425 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 426 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 427 // CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP13]], i32 1, i64 8, i8* [[TMP14]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 428 // CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 429 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 430 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 431 // CHECK1-NEXT: ] 432 // CHECK1: .omp.reduction.case1: 433 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYIDX]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) 434 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast %class.S2* [[ARRAYIDX]] to i8* 435 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast %class.S2* [[CALL]] to i8* 436 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false) 437 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP13]], [8 x i32]* @.gomp_critical_user_.reduction.var) 438 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 439 // CHECK1: .omp.reduction.case2: 440 // CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 441 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4 442 // CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 443 // CHECK1-NEXT: [[CALL2:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYIDX]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) 444 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast %class.S2* [[ARRAYIDX]] to i8* 445 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast %class.S2* [[CALL2]] to i8* 446 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP20]], i8* align 4 [[TMP21]], i64 4, i1 false) 447 // CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 448 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 449 // CHECK1: .omp.reduction.default: 450 // CHECK1-NEXT: ret void 451 // 452 // 453 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 454 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] { 455 // CHECK1-NEXT: entry: 456 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 457 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 458 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 459 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 460 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 461 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 462 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 463 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 464 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 465 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 466 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %class.S2* 467 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 468 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 469 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %class.S2* 470 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[TMP11]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[TMP8]]) 471 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast %class.S2* [[TMP11]] to i8* 472 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast %class.S2* [[CALL]] to i8* 473 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i64 4, i1 false) 474 // CHECK1-NEXT: ret void 475 // 476 // 477 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55 478 // CHECK1-SAME: ([10 x [10 x [10 x double]]]* noundef nonnull align 8 dereferenceable(8000) [[B:%.*]]) #[[ATTR2]] { 479 // CHECK1-NEXT: entry: 480 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x [10 x [10 x double]]]*, align 8 481 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 482 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x [10 x [10 x double]]]*, [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 483 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x [10 x [10 x double]]]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [10 x [10 x [10 x double]]]* [[TMP0]]) 484 // CHECK1-NEXT: ret void 485 // 486 // 487 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 488 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x [10 x [10 x double]]]* noundef nonnull align 8 dereferenceable(8000) [[B:%.*]]) #[[ATTR3]] { 489 // CHECK1-NEXT: entry: 490 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 491 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 492 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x [10 x [10 x double]]]*, align 8 493 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 494 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 495 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 496 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 497 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 498 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 499 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 500 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 501 // CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [1 x %struct.kmp_taskred_input_t], align 8 502 // CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8 503 // CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8 504 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8 505 // CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca double, align 8 506 // CHECK1-NEXT: [[_TMP30:%.*]] = alloca double, align 8 507 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 508 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 509 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 510 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x [10 x [10 x double]]]*, [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 511 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 512 // CHECK1-NEXT: store i64 9, i64* [[DOTOMP_UB]], align 8 513 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 514 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 515 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 0 516 // CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX]], i64 0, i64 0 517 // CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY]], i64 2 518 // CHECK1-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX1]], i64 0, i64 0 519 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY2]], i64 1 520 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 1 521 // CHECK1-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX4]], i64 0, i64 0 522 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY5]], i64 5 523 // CHECK1-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX6]], i64 0, i64 0 524 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY7]], i64 1 525 // CHECK1-NEXT: [[TMP1:%.*]] = ptrtoint double* [[ARRAYIDX8]] to i64 526 // CHECK1-NEXT: [[TMP2:%.*]] = ptrtoint double* [[ARRAYIDX3]] to i64 527 // CHECK1-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], [[TMP2]] 528 // CHECK1-NEXT: [[TMP4:%.*]] = sdiv exact i64 [[TMP3]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 529 // CHECK1-NEXT: [[TMP5:%.*]] = add nuw i64 [[TMP4]], 1 530 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 531 // CHECK1-NEXT: [[TMP7:%.*]] = call i8* @llvm.stacksave() 532 // CHECK1-NEXT: store i8* [[TMP7]], i8** [[SAVED_STACK]], align 8 533 // CHECK1-NEXT: [[VLA:%.*]] = alloca double, i64 [[TMP5]], align 8 534 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR0]], align 8 535 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr double, double* [[VLA]], i64 [[TMP5]] 536 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq double* [[VLA]], [[TMP8]] 537 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 538 // CHECK1: omp.arrayinit.body: 539 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 540 // CHECK1-NEXT: store double 0.000000e+00, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 541 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 542 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] 543 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 544 // CHECK1: omp.arrayinit.done: 545 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast [10 x [10 x [10 x double]]]* [[TMP0]] to double* 546 // CHECK1-NEXT: [[TMP10:%.*]] = ptrtoint double* [[TMP9]] to i64 547 // CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint double* [[ARRAYIDX3]] to i64 548 // CHECK1-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]] 549 // CHECK1-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 550 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr double, double* [[VLA]], i64 [[TMP13]] 551 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP14]] to [10 x [10 x [10 x double]]]* 552 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [1 x %struct.kmp_taskred_input_t], [1 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0 553 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0 554 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 0 555 // CHECK1-NEXT: [[ARRAYDECAY10:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX9]], i64 0, i64 0 556 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY10]], i64 2 557 // CHECK1-NEXT: [[ARRAYDECAY12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i64 0, i64 0 558 // CHECK1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY12]], i64 1 559 // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 1 560 // CHECK1-NEXT: [[ARRAYDECAY15:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX14]], i64 0, i64 0 561 // CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY15]], i64 5 562 // CHECK1-NEXT: [[ARRAYDECAY17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 0 563 // CHECK1-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY17]], i64 1 564 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast double* [[VLA]] to i8* 565 // CHECK1-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8 566 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1 567 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast double* [[ARRAYIDX13]] to i8* 568 // CHECK1-NEXT: store i8* [[TMP19]], i8** [[TMP18]], align 8 569 // CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint double* [[ARRAYIDX18]] to i64 570 // CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint double* [[ARRAYIDX13]] to i64 571 // CHECK1-NEXT: [[TMP22:%.*]] = sub i64 [[TMP20]], [[TMP21]] 572 // CHECK1-NEXT: [[TMP23:%.*]] = sdiv exact i64 [[TMP22]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 573 // CHECK1-NEXT: [[TMP24:%.*]] = add nuw i64 [[TMP23]], 1 574 // CHECK1-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 575 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2 576 // CHECK1-NEXT: store i64 [[TMP25]], i64* [[TMP26]], align 8 577 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3 578 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP27]], align 8 579 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4 580 // CHECK1-NEXT: store i8* null, i8** [[TMP28]], align 8 581 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5 582 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP29]], align 8 583 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6 584 // CHECK1-NEXT: store i32 1, i32* [[TMP30]], align 8 585 // CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 586 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4 587 // CHECK1-NEXT: [[TMP33:%.*]] = bitcast [1 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8* 588 // CHECK1-NEXT: [[TMP34:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB2]], i32 [[TMP32]], i32 1, i32 1, i8* [[TMP33]]) 589 // CHECK1-NEXT: store i8* [[TMP34]], i8** [[DOTTASK_RED_]], align 8 590 // CHECK1-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 591 // CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4 592 // CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP36]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 593 // CHECK1-NEXT: [[TMP37:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 594 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP37]], 9 595 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 596 // CHECK1: cond.true: 597 // CHECK1-NEXT: br label [[COND_END:%.*]] 598 // CHECK1: cond.false: 599 // CHECK1-NEXT: [[TMP38:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 600 // CHECK1-NEXT: br label [[COND_END]] 601 // CHECK1: cond.end: 602 // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP38]], [[COND_FALSE]] ] 603 // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 604 // CHECK1-NEXT: [[TMP39:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 605 // CHECK1-NEXT: store i64 [[TMP39]], i64* [[DOTOMP_IV]], align 8 606 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 607 // CHECK1: omp.inner.for.cond: 608 // CHECK1-NEXT: [[TMP40:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 609 // CHECK1-NEXT: [[TMP41:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 610 // CHECK1-NEXT: [[CMP19:%.*]] = icmp sle i64 [[TMP40]], [[TMP41]] 611 // CHECK1-NEXT: br i1 [[CMP19]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] 612 // CHECK1: omp.inner.for.cond.cleanup: 613 // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 614 // CHECK1: omp.inner.for.body: 615 // CHECK1-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 616 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP42]], 1 617 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] 618 // CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8 619 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 620 // CHECK1: omp.body.continue: 621 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 622 // CHECK1: omp.inner.for.inc: 623 // CHECK1-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 624 // CHECK1-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP43]], 1 625 // CHECK1-NEXT: store i64 [[ADD20]], i64* [[DOTOMP_IV]], align 8 626 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 627 // CHECK1: omp.inner.for.end: 628 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 629 // CHECK1: omp.loop.exit: 630 // CHECK1-NEXT: [[TMP44:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 631 // CHECK1-NEXT: [[TMP45:%.*]] = load i32, i32* [[TMP44]], align 4 632 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP45]]) 633 // CHECK1-NEXT: [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 634 // CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4 635 // CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP47]], i32 1) 636 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 637 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast double* [[VLA]] to i8* 638 // CHECK1-NEXT: store i8* [[TMP49]], i8** [[TMP48]], align 8 639 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 640 // CHECK1-NEXT: [[TMP51:%.*]] = inttoptr i64 [[TMP5]] to i8* 641 // CHECK1-NEXT: store i8* [[TMP51]], i8** [[TMP50]], align 8 642 // CHECK1-NEXT: [[TMP52:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 643 // CHECK1-NEXT: [[TMP53:%.*]] = load i32, i32* [[TMP52]], align 4 644 // CHECK1-NEXT: [[TMP54:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 645 // CHECK1-NEXT: [[TMP55:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP53]], i32 1, i64 16, i8* [[TMP54]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) 646 // CHECK1-NEXT: switch i32 [[TMP55]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 647 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 648 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 649 // CHECK1-NEXT: ] 650 // CHECK1: .omp.reduction.case1: 651 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr double, double* [[ARRAYIDX3]], i64 [[TMP5]] 652 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[ARRAYIDX3]], [[TMP56]] 653 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE25:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 654 // CHECK1: omp.arraycpy.body: 655 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 656 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi double* [ [[ARRAYIDX3]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT23:%.*]], [[OMP_ARRAYCPY_BODY]] ] 657 // CHECK1-NEXT: [[TMP57:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], align 8 658 // CHECK1-NEXT: [[TMP58:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 659 // CHECK1-NEXT: [[ADD22:%.*]] = fadd double [[TMP57]], [[TMP58]] 660 // CHECK1-NEXT: store double [[ADD22]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], align 8 661 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT23]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1 662 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 663 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE24:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT23]], [[TMP56]] 664 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE24]], label [[OMP_ARRAYCPY_DONE25]], label [[OMP_ARRAYCPY_BODY]] 665 // CHECK1: omp.arraycpy.done25: 666 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP53]], [8 x i32]* @.gomp_critical_user_.reduction.var) 667 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 668 // CHECK1: .omp.reduction.case2: 669 // CHECK1-NEXT: [[TMP59:%.*]] = getelementptr double, double* [[ARRAYIDX3]], i64 [[TMP5]] 670 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY26:%.*]] = icmp eq double* [[ARRAYIDX3]], [[TMP59]] 671 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY26]], label [[OMP_ARRAYCPY_DONE35:%.*]], label [[OMP_ARRAYCPY_BODY27:%.*]] 672 // CHECK1: omp.arraycpy.body27: 673 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST28:%.*]] = phi double* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT33:%.*]], [[ATOMIC_EXIT:%.*]] ] 674 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST29:%.*]] = phi double* [ [[ARRAYIDX3]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT32:%.*]], [[ATOMIC_EXIT]] ] 675 // CHECK1-NEXT: [[TMP60:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], align 8 676 // CHECK1-NEXT: [[TMP61:%.*]] = bitcast double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]] to i64* 677 // CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i64, i64* [[TMP61]] monotonic, align 8 678 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] 679 // CHECK1: atomic_cont: 680 // CHECK1-NEXT: [[TMP62:%.*]] = phi i64 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY27]] ], [ [[TMP70:%.*]], [[ATOMIC_CONT]] ] 681 // CHECK1-NEXT: [[TMP63:%.*]] = bitcast double* [[ATOMIC_TEMP]] to i64* 682 // CHECK1-NEXT: [[TMP64:%.*]] = bitcast i64 [[TMP62]] to double 683 // CHECK1-NEXT: store double [[TMP64]], double* [[_TMP30]], align 8 684 // CHECK1-NEXT: [[TMP65:%.*]] = load double, double* [[_TMP30]], align 8 685 // CHECK1-NEXT: [[TMP66:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], align 8 686 // CHECK1-NEXT: [[ADD31:%.*]] = fadd double [[TMP65]], [[TMP66]] 687 // CHECK1-NEXT: store double [[ADD31]], double* [[ATOMIC_TEMP]], align 8 688 // CHECK1-NEXT: [[TMP67:%.*]] = load i64, i64* [[TMP63]], align 8 689 // CHECK1-NEXT: [[TMP68:%.*]] = bitcast double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]] to i64* 690 // CHECK1-NEXT: [[TMP69:%.*]] = cmpxchg i64* [[TMP68]], i64 [[TMP62]], i64 [[TMP67]] monotonic monotonic, align 8 691 // CHECK1-NEXT: [[TMP70]] = extractvalue { i64, i1 } [[TMP69]], 0 692 // CHECK1-NEXT: [[TMP71:%.*]] = extractvalue { i64, i1 } [[TMP69]], 1 693 // CHECK1-NEXT: br i1 [[TMP71]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] 694 // CHECK1: atomic_exit: 695 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT32]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]], i32 1 696 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT33]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], i32 1 697 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE34:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT32]], [[TMP59]] 698 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE34]], label [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_BODY27]] 699 // CHECK1: omp.arraycpy.done35: 700 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 701 // CHECK1: .omp.reduction.default: 702 // CHECK1-NEXT: [[TMP72:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 703 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP72]]) 704 // CHECK1-NEXT: ret void 705 // 706 // 707 // CHECK1-LABEL: define {{[^@]+}}@.red_init. 708 // CHECK1-SAME: (i8* noalias noundef [[TMP0:%.*]], i8* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] { 709 // CHECK1-NEXT: entry: 710 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 711 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 712 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 713 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 714 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to double** 715 // CHECK1-NEXT: [[TMP3:%.*]] = load double*, double** [[TMP2]], align 8 716 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* @{{reduction_size[.].+[.]}}, align 8 717 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr double, double* [[TMP3]], i64 [[TMP4]] 718 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq double* [[TMP3]], [[TMP5]] 719 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 720 // CHECK1: omp.arrayinit.body: 721 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 722 // CHECK1-NEXT: store double 0.000000e+00, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 723 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 724 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP5]] 725 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 726 // CHECK1: omp.arrayinit.done: 727 // CHECK1-NEXT: ret void 728 // 729 // 730 // CHECK1-LABEL: define {{[^@]+}}@.red_comb. 731 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] { 732 // CHECK1-NEXT: entry: 733 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 734 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 735 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 736 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 737 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* @{{reduction_size[.].+[.]}}, align 8 738 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[DOTADDR]] to double** 739 // CHECK1-NEXT: [[TMP4:%.*]] = load double*, double** [[TMP3]], align 8 740 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8** [[DOTADDR1]] to double** 741 // CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[TMP5]], align 8 742 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr double, double* [[TMP4]], i64 [[TMP2]] 743 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[TMP4]], [[TMP7]] 744 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 745 // CHECK1: omp.arraycpy.body: 746 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[TMP6]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 747 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 748 // CHECK1-NEXT: [[TMP8:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 749 // CHECK1-NEXT: [[TMP9:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 750 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP8]], [[TMP9]] 751 // CHECK1-NEXT: store double [[ADD]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 752 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 753 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 754 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP7]] 755 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 756 // CHECK1: omp.arraycpy.done2: 757 // CHECK1-NEXT: ret void 758 // 759 // 760 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 761 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] { 762 // CHECK1-NEXT: entry: 763 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 764 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 765 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 766 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 767 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 768 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]* 769 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 770 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]* 771 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0 772 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 773 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double* 774 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0 775 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 776 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double* 777 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1 778 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8 779 // CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint i8* [[TMP13]] to i64 780 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr double, double* [[TMP11]], i64 [[TMP14]] 781 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[TMP11]], [[TMP15]] 782 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 783 // CHECK1: omp.arraycpy.body: 784 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 785 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 786 // CHECK1-NEXT: [[TMP16:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 787 // CHECK1-NEXT: [[TMP17:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 788 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP16]], [[TMP17]] 789 // CHECK1-NEXT: store double [[ADD]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 790 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 791 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 792 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP15]] 793 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 794 // CHECK1: omp.arraycpy.done2: 795 // CHECK1-NEXT: ret void 796 // 797 // 798 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S2C2Ev 799 // CHECK1-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 { 800 // CHECK1-NEXT: entry: 801 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8 802 // CHECK1-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8 803 // CHECK1-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8 804 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[THIS1]], i32 0, i32 0 805 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 806 // CHECK1-NEXT: ret void 807 // 808 // 809 // CHECK1-LABEL: define {{[^@]+}}@main 810 // CHECK1-SAME: () #[[ATTR10:[0-9]+]] { 811 // CHECK1-NEXT: entry: 812 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 813 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 814 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 815 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3barv() 816 // CHECK1-NEXT: store i32 [[CALL]], i32* [[A]], align 4 817 // CHECK1-NEXT: ret i32 0 818 // 819 // 820 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 821 // CHECK1-SAME: () #[[ATTR11:[0-9]+]] { 822 // CHECK1-NEXT: entry: 823 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 824 // CHECK1-NEXT: ret void 825 // 826 // 827 // CHECK2-LABEL: define {{[^@]+}}@_Z3sumPiiS_ 828 // CHECK2-SAME: (i32* noundef [[INPUT:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]]) #[[ATTR0:[0-9]+]] { 829 // CHECK2-NEXT: entry: 830 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 831 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 832 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 833 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 834 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 835 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 836 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 837 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 838 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 839 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 840 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 841 // CHECK2-NEXT: [[SIZE_CASTED4:%.*]] = alloca i32, align 4 842 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [3 x i8*], align 4 843 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [3 x i8*], align 4 844 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [3 x i8*], align 4 845 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES10:%.*]] = alloca [3 x i64], align 4 846 // CHECK2-NEXT: [[_TMP11:%.*]] = alloca i32, align 4 847 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4 848 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_13:%.*]] = alloca i32, align 4 849 // CHECK2-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 850 // CHECK2-NEXT: [[SIZE_CASTED20:%.*]] = alloca i32, align 4 851 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 4 852 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 4 853 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 4 854 // CHECK2-NEXT: [[SIZE_CASTED27:%.*]] = alloca i32, align 4 855 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [2 x i8*], align 4 856 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [2 x i8*], align 4 857 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [2 x i8*], align 4 858 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 859 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 860 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 861 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 862 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 863 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 864 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 865 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 866 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 867 // CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 868 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 869 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 870 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 871 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0 872 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 873 // CHECK2-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4 874 // CHECK2-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64 875 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES]] to i8* 876 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP11]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false) 877 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 878 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 879 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[TMP13]], align 4 880 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 881 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 882 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[TMP15]], align 4 883 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 884 // CHECK2-NEXT: store i8* null, i8** [[TMP16]], align 4 885 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 886 // CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32** 887 // CHECK2-NEXT: store i32* [[TMP4]], i32** [[TMP18]], align 4 888 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 889 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32** 890 // CHECK2-NEXT: store i32* [[ARRAYIDX]], i32** [[TMP20]], align 4 891 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 892 // CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 4 893 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 894 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32** 895 // CHECK2-NEXT: store i32* [[TMP6]], i32** [[TMP23]], align 4 896 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 897 // CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32** 898 // CHECK2-NEXT: store i32* [[ARRAYIDX1]], i32** [[TMP25]], align 4 899 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 900 // CHECK2-NEXT: store i64 [[TMP10]], i64* [[TMP26]], align 4 901 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 902 // CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 4 903 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 904 // CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 905 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 906 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 907 // CHECK2-NEXT: store i32 [[TMP31]], i32* [[DOTCAPTURE_EXPR_]], align 4 908 // CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 909 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP32]], 0 910 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 911 // CHECK2-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 912 // CHECK2-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 913 // CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 914 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP33]], 1 915 // CHECK2-NEXT: [[TMP34:%.*]] = zext i32 [[ADD]] to i64 916 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP34]]) 917 // CHECK2-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69.region_id, i32 3, i8** [[TMP28]], i8** [[TMP29]], i64* [[TMP30]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) 918 // CHECK2-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 919 // CHECK2-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 920 // CHECK2: omp_offload.failed: 921 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) #[[ATTR2:[0-9]+]] 922 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 923 // CHECK2: omp_offload.cont: 924 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 925 // CHECK2-NEXT: store i32 [[TMP37]], i32* [[SIZE_CASTED4]], align 4 926 // CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[SIZE_CASTED4]], align 4 927 // CHECK2-NEXT: [[TMP39:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 928 // CHECK2-NEXT: [[TMP40:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 929 // CHECK2-NEXT: [[TMP41:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 930 // CHECK2-NEXT: [[TMP42:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 931 // CHECK2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP42]], i32 0 932 // CHECK2-NEXT: [[TMP43:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 933 // CHECK2-NEXT: [[TMP44:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 934 // CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i32 0 935 // CHECK2-NEXT: [[TMP45:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 936 // CHECK2-NEXT: [[TMP46:%.*]] = mul nuw i32 [[TMP45]], 4 937 // CHECK2-NEXT: [[TMP47:%.*]] = sext i32 [[TMP46]] to i64 938 // CHECK2-NEXT: [[TMP48:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES10]] to i8* 939 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP48]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes.7 to i8*), i32 24, i1 false) 940 // CHECK2-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 941 // CHECK2-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32* 942 // CHECK2-NEXT: store i32 [[TMP38]], i32* [[TMP50]], align 4 943 // CHECK2-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 944 // CHECK2-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32* 945 // CHECK2-NEXT: store i32 [[TMP38]], i32* [[TMP52]], align 4 946 // CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0 947 // CHECK2-NEXT: store i8* null, i8** [[TMP53]], align 4 948 // CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1 949 // CHECK2-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32** 950 // CHECK2-NEXT: store i32* [[TMP41]], i32** [[TMP55]], align 4 951 // CHECK2-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1 952 // CHECK2-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to i32** 953 // CHECK2-NEXT: store i32* [[ARRAYIDX5]], i32** [[TMP57]], align 4 954 // CHECK2-NEXT: [[TMP58:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1 955 // CHECK2-NEXT: store i8* null, i8** [[TMP58]], align 4 956 // CHECK2-NEXT: [[TMP59:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 2 957 // CHECK2-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32** 958 // CHECK2-NEXT: store i32* [[TMP43]], i32** [[TMP60]], align 4 959 // CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 2 960 // CHECK2-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32** 961 // CHECK2-NEXT: store i32* [[ARRAYIDX6]], i32** [[TMP62]], align 4 962 // CHECK2-NEXT: [[TMP63:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES10]], i32 0, i32 2 963 // CHECK2-NEXT: store i64 [[TMP47]], i64* [[TMP63]], align 4 964 // CHECK2-NEXT: [[TMP64:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 2 965 // CHECK2-NEXT: store i8* null, i8** [[TMP64]], align 4 966 // CHECK2-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 967 // CHECK2-NEXT: [[TMP66:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 968 // CHECK2-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES10]], i32 0, i32 0 969 // CHECK2-NEXT: [[TMP68:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 970 // CHECK2-NEXT: store i32 [[TMP68]], i32* [[DOTCAPTURE_EXPR_12]], align 4 971 // CHECK2-NEXT: [[TMP69:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4 972 // CHECK2-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP69]], 0 973 // CHECK2-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1 974 // CHECK2-NEXT: [[SUB16:%.*]] = sub nsw i32 [[DIV15]], 1 975 // CHECK2-NEXT: store i32 [[SUB16]], i32* [[DOTCAPTURE_EXPR_13]], align 4 976 // CHECK2-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_13]], align 4 977 // CHECK2-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP70]], 1 978 // CHECK2-NEXT: [[TMP71:%.*]] = zext i32 [[ADD17]] to i64 979 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 [[TMP71]]) 980 // CHECK2-NEXT: [[TMP72:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73.region_id, i32 3, i8** [[TMP65]], i8** [[TMP66]], i64* [[TMP67]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) 981 // CHECK2-NEXT: [[TMP73:%.*]] = icmp ne i32 [[TMP72]], 0 982 // CHECK2-NEXT: br i1 [[TMP73]], label [[OMP_OFFLOAD_FAILED18:%.*]], label [[OMP_OFFLOAD_CONT19:%.*]] 983 // CHECK2: omp_offload.failed18: 984 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73(i32 [[TMP38]], i32* [[TMP39]], i32* [[TMP40]]) #[[ATTR2]] 985 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT19]] 986 // CHECK2: omp_offload.cont19: 987 // CHECK2-NEXT: [[TMP74:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 988 // CHECK2-NEXT: store i32 [[TMP74]], i32* [[SIZE_CASTED20]], align 4 989 // CHECK2-NEXT: [[TMP75:%.*]] = load i32, i32* [[SIZE_CASTED20]], align 4 990 // CHECK2-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 0 991 // CHECK2-NEXT: [[TMP76:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 992 // CHECK2-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* 993 // CHECK2-NEXT: store i32 [[TMP75]], i32* [[TMP77]], align 4 994 // CHECK2-NEXT: [[TMP78:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 995 // CHECK2-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* 996 // CHECK2-NEXT: store i32 [[TMP75]], i32* [[TMP79]], align 4 997 // CHECK2-NEXT: [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i32 0, i32 0 998 // CHECK2-NEXT: store i8* null, i8** [[TMP80]], align 4 999 // CHECK2-NEXT: [[TMP81:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 1000 // CHECK2-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to [10 x i32]** 1001 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP82]], align 4 1002 // CHECK2-NEXT: [[TMP83:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 1003 // CHECK2-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** 1004 // CHECK2-NEXT: store i32* [[ARRAYIDX21]], i32** [[TMP84]], align 4 1005 // CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i32 0, i32 1 1006 // CHECK2-NEXT: store i8* null, i8** [[TMP85]], align 4 1007 // CHECK2-NEXT: [[TMP86:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 1008 // CHECK2-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 1009 // CHECK2-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78.region_id, i32 2, i8** [[TMP86]], i8** [[TMP87]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1010 // CHECK2-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 1011 // CHECK2-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] 1012 // CHECK2: omp_offload.failed25: 1013 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78(i32 [[TMP75]], [10 x i32]* [[A]]) #[[ATTR2]] 1014 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT26]] 1015 // CHECK2: omp_offload.cont26: 1016 // CHECK2-NEXT: [[TMP90:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1017 // CHECK2-NEXT: store i32 [[TMP90]], i32* [[SIZE_CASTED27]], align 4 1018 // CHECK2-NEXT: [[TMP91:%.*]] = load i32, i32* [[SIZE_CASTED27]], align 4 1019 // CHECK2-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 3 1020 // CHECK2-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 1021 // CHECK2-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i32* 1022 // CHECK2-NEXT: store i32 [[TMP91]], i32* [[TMP93]], align 4 1023 // CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 1024 // CHECK2-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* 1025 // CHECK2-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 1026 // CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 0 1027 // CHECK2-NEXT: store i8* null, i8** [[TMP96]], align 4 1028 // CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 1029 // CHECK2-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x i32]** 1030 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP98]], align 4 1031 // CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 1032 // CHECK2-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32** 1033 // CHECK2-NEXT: store i32* [[ARRAYIDX28]], i32** [[TMP100]], align 4 1034 // CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 1 1035 // CHECK2-NEXT: store i8* null, i8** [[TMP101]], align 4 1036 // CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 1037 // CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 1038 // CHECK2-NEXT: [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81.region_id, i32 2, i8** [[TMP102]], i8** [[TMP103]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1039 // CHECK2-NEXT: [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0 1040 // CHECK2-NEXT: br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] 1041 // CHECK2: omp_offload.failed32: 1042 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81(i32 [[TMP91]], [10 x i32]* [[A]]) #[[ATTR2]] 1043 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT33]] 1044 // CHECK2: omp_offload.cont33: 1045 // CHECK2-NEXT: ret void 1046 // 1047 // 1048 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69 1049 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1:[0-9]+]] { 1050 // CHECK2-NEXT: entry: 1051 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1052 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1053 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1054 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1055 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1056 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1057 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1058 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1059 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 1060 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1061 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1062 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1063 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) 1064 // CHECK2-NEXT: ret void 1065 // 1066 // 1067 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 1068 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1069 // CHECK2-NEXT: entry: 1070 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1071 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1072 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1073 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1074 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1075 // CHECK2-NEXT: [[OUTPUT1:%.*]] = alloca i32, align 4 1076 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 4 1077 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1078 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 1079 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1080 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 1081 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1082 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1083 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1084 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1085 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1086 // CHECK2-NEXT: [[I5:%.*]] = alloca i32, align 4 1087 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1088 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1089 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1090 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1091 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1092 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1093 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1094 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1095 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 0 1096 // CHECK2-NEXT: store i32 0, i32* [[OUTPUT1]], align 4 1097 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1098 // CHECK2-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[TMP1]] to i64 1099 // CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1100 // CHECK2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] 1101 // CHECK2-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1102 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[OUTPUT1]], i64 [[TMP5]] 1103 // CHECK2-NEXT: store i32* [[TMP6]], i32** [[TMP]], align 4 1104 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1105 // CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_]], align 4 1106 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1107 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0 1108 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1109 // CHECK2-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 1110 // CHECK2-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4 1111 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1112 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1113 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] 1114 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1115 // CHECK2: omp.precond.then: 1116 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1117 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1118 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_COMB_UB]], align 4 1119 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1120 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1121 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1122 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1123 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP12]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1124 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1125 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1126 // CHECK2-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]] 1127 // CHECK2-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1128 // CHECK2: cond.true: 1129 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1130 // CHECK2-NEXT: br label [[COND_END:%.*]] 1131 // CHECK2: cond.false: 1132 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1133 // CHECK2-NEXT: br label [[COND_END]] 1134 // CHECK2: cond.end: 1135 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ] 1136 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1137 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1138 // CHECK2-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4 1139 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1140 // CHECK2: omp.inner.for.cond: 1141 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1142 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1143 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] 1144 // CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1145 // CHECK2: omp.inner.for.body: 1146 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1147 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1148 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1149 // CHECK2-NEXT: store i32 [[TMP22]], i32* [[SIZE_CASTED]], align 4 1150 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1151 // CHECK2-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP]], align 4 1152 // CHECK2-NEXT: [[TMP25:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1153 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP20]], i32 [[TMP21]], i32 [[TMP23]], i32* [[TMP24]], i32* [[TMP25]]) 1154 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1155 // CHECK2: omp.inner.for.inc: 1156 // CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1157 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1158 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP26]], [[TMP27]] 1159 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 1160 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1161 // CHECK2: omp.inner.for.end: 1162 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1163 // CHECK2: omp.loop.exit: 1164 // CHECK2-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1165 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 1166 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP29]]) 1167 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1168 // CHECK2: omp.precond.end: 1169 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1170 // CHECK2-NEXT: [[TMP31:%.*]] = bitcast i32* [[OUTPUT1]] to i8* 1171 // CHECK2-NEXT: store i8* [[TMP31]], i8** [[TMP30]], align 4 1172 // CHECK2-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1173 // CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4 1174 // CHECK2-NEXT: [[TMP34:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1175 // CHECK2-NEXT: [[TMP35:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP33]], i32 1, i32 4, i8* [[TMP34]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) 1176 // CHECK2-NEXT: switch i32 [[TMP35]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1177 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1178 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1179 // CHECK2-NEXT: ] 1180 // CHECK2: .omp.reduction.case1: 1181 // CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1182 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[OUTPUT1]], align 4 1183 // CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]] 1184 // CHECK2-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4 1185 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP33]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1186 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1187 // CHECK2: .omp.reduction.case2: 1188 // CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[OUTPUT1]], align 4 1189 // CHECK2-NEXT: [[TMP39:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP38]] monotonic, align 4 1190 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1191 // CHECK2: .omp.reduction.default: 1192 // CHECK2-NEXT: ret void 1193 // 1194 // 1195 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1 1196 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1197 // CHECK2-NEXT: entry: 1198 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1199 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1200 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4 1201 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4 1202 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1203 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1204 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1205 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1206 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1207 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1208 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1209 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1210 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1211 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1212 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1213 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1214 // CHECK2-NEXT: [[OUTPUT3:%.*]] = alloca i32, align 4 1215 // CHECK2-NEXT: [[_TMP4:%.*]] = alloca i32*, align 4 1216 // CHECK2-NEXT: [[I5:%.*]] = alloca i32, align 4 1217 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1218 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1219 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1220 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1221 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1222 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1223 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1224 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1225 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1226 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 1227 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1228 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0 1229 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1230 // CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 1231 // CHECK2-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1232 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1233 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1234 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]] 1235 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1236 // CHECK2: omp.precond.then: 1237 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1238 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1239 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4 1240 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1241 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1242 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_LB]], align 4 1243 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 1244 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1245 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1246 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1247 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 1248 // CHECK2-NEXT: store i32 0, i32* [[OUTPUT3]], align 4 1249 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1250 // CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint i32* [[TMP7]] to i64 1251 // CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1252 // CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]] 1253 // CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1254 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[OUTPUT3]], i64 [[TMP11]] 1255 // CHECK2-NEXT: store i32* [[TMP12]], i32** [[_TMP4]], align 4 1256 // CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1257 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4 1258 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP14]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1259 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1260 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1261 // CHECK2-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP15]], [[TMP16]] 1262 // CHECK2-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1263 // CHECK2: cond.true: 1264 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1265 // CHECK2-NEXT: br label [[COND_END:%.*]] 1266 // CHECK2: cond.false: 1267 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1268 // CHECK2-NEXT: br label [[COND_END]] 1269 // CHECK2: cond.end: 1270 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP17]], [[COND_TRUE]] ], [ [[TMP18]], [[COND_FALSE]] ] 1271 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1272 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1273 // CHECK2-NEXT: store i32 [[TMP19]], i32* [[DOTOMP_IV]], align 4 1274 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1275 // CHECK2: omp.inner.for.cond: 1276 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1277 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1278 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] 1279 // CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1280 // CHECK2: omp.inner.for.body: 1281 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1282 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1 1283 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1284 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I5]], align 4 1285 // CHECK2-NEXT: [[TMP23:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1286 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[I5]], align 4 1287 // CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]] 1288 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4 1289 // CHECK2-NEXT: [[TMP26:%.*]] = load i32*, i32** [[_TMP4]], align 4 1290 // CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 0 1291 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 1292 // CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP27]], [[TMP25]] 1293 // CHECK2-NEXT: store i32 [[ADD10]], i32* [[ARRAYIDX9]], align 4 1294 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1295 // CHECK2: omp.body.continue: 1296 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1297 // CHECK2: omp.inner.for.inc: 1298 // CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1299 // CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP28]], 1 1300 // CHECK2-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4 1301 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1302 // CHECK2: omp.inner.for.end: 1303 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1304 // CHECK2: omp.loop.exit: 1305 // CHECK2-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1306 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 1307 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 1308 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1309 // CHECK2-NEXT: [[TMP32:%.*]] = bitcast i32* [[OUTPUT3]] to i8* 1310 // CHECK2-NEXT: store i8* [[TMP32]], i8** [[TMP31]], align 4 1311 // CHECK2-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1312 // CHECK2-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4 1313 // CHECK2-NEXT: [[TMP35:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1314 // CHECK2-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP34]], i32 1, i32 4, i8* [[TMP35]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 1315 // CHECK2-NEXT: switch i32 [[TMP36]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1316 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1317 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1318 // CHECK2-NEXT: ] 1319 // CHECK2: .omp.reduction.case1: 1320 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1321 // CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[OUTPUT3]], align 4 1322 // CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP37]], [[TMP38]] 1323 // CHECK2-NEXT: store i32 [[ADD12]], i32* [[ARRAYIDX]], align 4 1324 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP34]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1325 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1326 // CHECK2: .omp.reduction.case2: 1327 // CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[OUTPUT3]], align 4 1328 // CHECK2-NEXT: [[TMP40:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP39]] monotonic, align 4 1329 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1330 // CHECK2: .omp.reduction.default: 1331 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1332 // CHECK2: omp.precond.end: 1333 // CHECK2-NEXT: ret void 1334 // 1335 // 1336 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 1337 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { 1338 // CHECK2-NEXT: entry: 1339 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1340 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1341 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1342 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1343 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1344 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1345 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1346 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1347 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1348 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1349 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1350 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1351 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1352 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1353 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1354 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 1355 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 1356 // CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 1357 // CHECK2-NEXT: ret void 1358 // 1359 // 1360 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 1361 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1362 // CHECK2-NEXT: entry: 1363 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1364 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1365 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1366 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1367 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1368 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1369 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1370 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1371 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1372 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1373 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1374 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1375 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1376 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1377 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1378 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 1379 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 1380 // CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 1381 // CHECK2-NEXT: ret void 1382 // 1383 // 1384 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73 1385 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1386 // CHECK2-NEXT: entry: 1387 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1388 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1389 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1390 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1391 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1392 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1393 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1394 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1395 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 1396 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1397 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1398 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1399 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) 1400 // CHECK2-NEXT: ret void 1401 // 1402 // 1403 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3 1404 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1405 // CHECK2-NEXT: entry: 1406 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1407 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1408 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1409 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1410 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1411 // CHECK2-NEXT: [[OUTPUT2:%.*]] = alloca [3 x i32], align 4 1412 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 4 1413 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1414 // CHECK2-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 1415 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1416 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 1417 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1418 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1419 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1420 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1421 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1422 // CHECK2-NEXT: [[I6:%.*]] = alloca i32, align 4 1423 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1424 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1425 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1426 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1427 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1428 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1429 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1430 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1431 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 0 1432 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1433 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 2 1434 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[OUTPUT2]], i32 0, i32 0 1435 // CHECK2-NEXT: [[TMP2:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 3 1436 // CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP2]] 1437 // CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 1438 // CHECK2: omp.arrayinit.body: 1439 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 1440 // CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1441 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1442 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP2]] 1443 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 1444 // CHECK2: omp.arrayinit.done: 1445 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1446 // CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64 1447 // CHECK2-NEXT: [[TMP5:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1448 // CHECK2-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]] 1449 // CHECK2-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1450 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast [3 x i32]* [[OUTPUT2]] to i32* 1451 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP8]], i64 [[TMP7]] 1452 // CHECK2-NEXT: store i32* [[TMP9]], i32** [[TMP]], align 4 1453 // CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [3 x i32]* [[OUTPUT2]] to i32* 1454 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1455 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR_]], align 4 1456 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1457 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], 0 1458 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1459 // CHECK2-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 1460 // CHECK2-NEXT: store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_4]], align 4 1461 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1462 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1463 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP12]] 1464 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1465 // CHECK2: omp.precond.then: 1466 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1467 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 1468 // CHECK2-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_COMB_UB]], align 4 1469 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1470 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1471 // CHECK2-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1472 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4 1473 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP15]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1474 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1475 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 1476 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP16]], [[TMP17]] 1477 // CHECK2-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1478 // CHECK2: cond.true: 1479 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 1480 // CHECK2-NEXT: br label [[COND_END:%.*]] 1481 // CHECK2: cond.false: 1482 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1483 // CHECK2-NEXT: br label [[COND_END]] 1484 // CHECK2: cond.end: 1485 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP18]], [[COND_TRUE]] ], [ [[TMP19]], [[COND_FALSE]] ] 1486 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1487 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1488 // CHECK2-NEXT: store i32 [[TMP20]], i32* [[DOTOMP_IV]], align 4 1489 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1490 // CHECK2: omp.inner.for.cond: 1491 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1492 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1493 // CHECK2-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]] 1494 // CHECK2-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1495 // CHECK2: omp.inner.for.body: 1496 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1497 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1498 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1499 // CHECK2-NEXT: store i32 [[TMP25]], i32* [[SIZE_CASTED]], align 4 1500 // CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1501 // CHECK2-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP]], align 4 1502 // CHECK2-NEXT: [[TMP28:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1503 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP23]], i32 [[TMP24]], i32 [[TMP26]], i32* [[TMP27]], i32* [[TMP28]]) 1504 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1505 // CHECK2: omp.inner.for.inc: 1506 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1507 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1508 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], [[TMP30]] 1509 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 1510 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1511 // CHECK2: omp.inner.for.end: 1512 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1513 // CHECK2: omp.loop.exit: 1514 // CHECK2-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1515 // CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4 1516 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP32]]) 1517 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1518 // CHECK2: omp.precond.end: 1519 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1520 // CHECK2-NEXT: [[TMP34:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 1521 // CHECK2-NEXT: store i8* [[TMP34]], i8** [[TMP33]], align 4 1522 // CHECK2-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1523 // CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4 1524 // CHECK2-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1525 // CHECK2-NEXT: [[TMP38:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], i32 1, i32 4, i8* [[TMP37]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var) 1526 // CHECK2-NEXT: switch i32 [[TMP38]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1527 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1528 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1529 // CHECK2-NEXT: ] 1530 // CHECK2: .omp.reduction.case1: 1531 // CHECK2-NEXT: [[TMP39:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1532 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP39]] 1533 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE13:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1534 // CHECK2: omp.arraycpy.body: 1535 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1536 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST9:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1537 // CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], align 4 1538 // CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1539 // CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] 1540 // CHECK2-NEXT: store i32 [[ADD10]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], align 4 1541 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], i32 1 1542 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1543 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE12:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP39]] 1544 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE12]], label [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_BODY]] 1545 // CHECK2: omp.arraycpy.done13: 1546 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1547 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1548 // CHECK2: .omp.reduction.case2: 1549 // CHECK2-NEXT: [[TMP42:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1550 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY14:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP42]] 1551 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY14]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY15:%.*]] 1552 // CHECK2: omp.arraycpy.body15: 1553 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST16:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT19:%.*]], [[OMP_ARRAYCPY_BODY15]] ] 1554 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST17:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT18:%.*]], [[OMP_ARRAYCPY_BODY15]] ] 1555 // CHECK2-NEXT: [[TMP43:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST16]], align 4 1556 // CHECK2-NEXT: [[TMP44:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST17]], i32 [[TMP43]] monotonic, align 4 1557 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT18]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST17]], i32 1 1558 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT19]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST16]], i32 1 1559 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT18]], [[TMP42]] 1560 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY15]] 1561 // CHECK2: omp.arraycpy.done21: 1562 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1563 // CHECK2: .omp.reduction.default: 1564 // CHECK2-NEXT: ret void 1565 // 1566 // 1567 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 1568 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1569 // CHECK2-NEXT: entry: 1570 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1571 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1572 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4 1573 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4 1574 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1575 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1576 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1577 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1578 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1579 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1580 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1581 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1582 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1583 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1584 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1585 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1586 // CHECK2-NEXT: [[OUTPUT4:%.*]] = alloca [3 x i32], align 4 1587 // CHECK2-NEXT: [[_TMP5:%.*]] = alloca i32*, align 4 1588 // CHECK2-NEXT: [[I6:%.*]] = alloca i32, align 4 1589 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1590 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1591 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1592 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1593 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1594 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1595 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1596 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1597 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1598 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 1599 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1600 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0 1601 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1602 // CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 1603 // CHECK2-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1604 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1605 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1606 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]] 1607 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1608 // CHECK2: omp.precond.then: 1609 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1610 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1611 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4 1612 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1613 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1614 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_LB]], align 4 1615 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 1616 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1617 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1618 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1619 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 1620 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1621 // CHECK2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 2 1622 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[OUTPUT4]], i32 0, i32 0 1623 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 3 1624 // CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP8]] 1625 // CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 1626 // CHECK2: omp.arrayinit.body: 1627 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[OMP_PRECOND_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 1628 // CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1629 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1630 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] 1631 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 1632 // CHECK2: omp.arrayinit.done: 1633 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1634 // CHECK2-NEXT: [[TMP10:%.*]] = ptrtoint i32* [[TMP9]] to i64 1635 // CHECK2-NEXT: [[TMP11:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1636 // CHECK2-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]] 1637 // CHECK2-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1638 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast [3 x i32]* [[OUTPUT4]] to i32* 1639 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr i32, i32* [[TMP14]], i64 [[TMP13]] 1640 // CHECK2-NEXT: store i32* [[TMP15]], i32** [[_TMP5]], align 4 1641 // CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [3 x i32]* [[OUTPUT4]] to i32* 1642 // CHECK2-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1643 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4 1644 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP17]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1645 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1646 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1647 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP18]], [[TMP19]] 1648 // CHECK2-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1649 // CHECK2: cond.true: 1650 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1651 // CHECK2-NEXT: br label [[COND_END:%.*]] 1652 // CHECK2: cond.false: 1653 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1654 // CHECK2-NEXT: br label [[COND_END]] 1655 // CHECK2: cond.end: 1656 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP20]], [[COND_TRUE]] ], [ [[TMP21]], [[COND_FALSE]] ] 1657 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1658 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1659 // CHECK2-NEXT: store i32 [[TMP22]], i32* [[DOTOMP_IV]], align 4 1660 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1661 // CHECK2: omp.inner.for.cond: 1662 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1663 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1664 // CHECK2-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]] 1665 // CHECK2-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1666 // CHECK2: omp.inner.for.body: 1667 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1668 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP25]], 1 1669 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1670 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I6]], align 4 1671 // CHECK2-NEXT: [[TMP26:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1672 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[I6]], align 4 1673 // CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]] 1674 // CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 1675 // CHECK2-NEXT: [[TMP29:%.*]] = load i32*, i32** [[_TMP5]], align 4 1676 // CHECK2-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 0 1677 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX10]], align 4 1678 // CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP30]], [[TMP28]] 1679 // CHECK2-NEXT: store i32 [[ADD11]], i32* [[ARRAYIDX10]], align 4 1680 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1681 // CHECK2: omp.body.continue: 1682 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1683 // CHECK2: omp.inner.for.inc: 1684 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1685 // CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP31]], 1 1686 // CHECK2-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4 1687 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1688 // CHECK2: omp.inner.for.end: 1689 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1690 // CHECK2: omp.loop.exit: 1691 // CHECK2-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1692 // CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4 1693 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]]) 1694 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1695 // CHECK2-NEXT: [[TMP35:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 1696 // CHECK2-NEXT: store i8* [[TMP35]], i8** [[TMP34]], align 4 1697 // CHECK2-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1698 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4 1699 // CHECK2-NEXT: [[TMP38:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1700 // CHECK2-NEXT: [[TMP39:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP37]], i32 1, i32 4, i8* [[TMP38]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var) 1701 // CHECK2-NEXT: switch i32 [[TMP39]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1702 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1703 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1704 // CHECK2-NEXT: ] 1705 // CHECK2: .omp.reduction.case1: 1706 // CHECK2-NEXT: [[TMP40:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1707 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP40]] 1708 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE17:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1709 // CHECK2: omp.arraycpy.body: 1710 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1711 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST13:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT15:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1712 // CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], align 4 1713 // CHECK2-NEXT: [[TMP42:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1714 // CHECK2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP41]], [[TMP42]] 1715 // CHECK2-NEXT: store i32 [[ADD14]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], align 4 1716 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT15]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], i32 1 1717 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1718 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE16:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT15]], [[TMP40]] 1719 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE16]], label [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_BODY]] 1720 // CHECK2: omp.arraycpy.done17: 1721 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP37]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1722 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1723 // CHECK2: .omp.reduction.case2: 1724 // CHECK2-NEXT: [[TMP43:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1725 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY18:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP43]] 1726 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY18]], label [[OMP_ARRAYCPY_DONE25:%.*]], label [[OMP_ARRAYCPY_BODY19:%.*]] 1727 // CHECK2: omp.arraycpy.body19: 1728 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST20:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT23:%.*]], [[OMP_ARRAYCPY_BODY19]] ] 1729 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT22:%.*]], [[OMP_ARRAYCPY_BODY19]] ] 1730 // CHECK2-NEXT: [[TMP44:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 4 1731 // CHECK2-NEXT: [[TMP45:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 [[TMP44]] monotonic, align 4 1732 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT22]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1 1733 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT23]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], i32 1 1734 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE24:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT22]], [[TMP43]] 1735 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE24]], label [[OMP_ARRAYCPY_DONE25]], label [[OMP_ARRAYCPY_BODY19]] 1736 // CHECK2: omp.arraycpy.done25: 1737 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1738 // CHECK2: .omp.reduction.default: 1739 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1740 // CHECK2: omp.precond.end: 1741 // CHECK2-NEXT: ret void 1742 // 1743 // 1744 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5 1745 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1746 // CHECK2-NEXT: entry: 1747 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1748 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1749 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1750 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1751 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1752 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1753 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1754 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1755 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1756 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1757 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1758 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1759 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1760 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1761 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 3 1762 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 1763 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1764 // CHECK2: omp.arraycpy.body: 1765 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1766 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1767 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1768 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1769 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1770 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1771 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1772 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1773 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 1774 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 1775 // CHECK2: omp.arraycpy.done2: 1776 // CHECK2-NEXT: ret void 1777 // 1778 // 1779 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6 1780 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1781 // CHECK2-NEXT: entry: 1782 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1783 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1784 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1785 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1786 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1787 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1788 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1789 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1790 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1791 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1792 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1793 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1794 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1795 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1796 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 3 1797 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 1798 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1799 // CHECK2: omp.arraycpy.body: 1800 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1801 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1802 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1803 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1804 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1805 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1806 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1807 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1808 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 1809 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 1810 // CHECK2: omp.arraycpy.done2: 1811 // CHECK2-NEXT: ret void 1812 // 1813 // 1814 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78 1815 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR6:[0-9]+]] { 1816 // CHECK2-NEXT: entry: 1817 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1818 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1819 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1820 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1821 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1822 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1823 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1824 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[SIZE_CASTED]], align 4 1825 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1826 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP2]], [10 x i32]* [[TMP0]]) 1827 // CHECK2-NEXT: ret void 1828 // 1829 // 1830 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 1831 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR1]] { 1832 // CHECK2-NEXT: entry: 1833 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1834 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1835 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1836 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1837 // CHECK2-NEXT: [[A2:%.*]] = alloca [2 x i32], align 4 1838 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1839 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1840 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1841 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1842 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1843 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1844 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1845 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 0 1846 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 1 1847 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A2]], i32 0, i32 0 1848 // CHECK2-NEXT: [[TMP1:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 2 1849 // CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP1]] 1850 // CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 1851 // CHECK2: omp.arrayinit.body: 1852 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 1853 // CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1854 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1855 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP1]] 1856 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 1857 // CHECK2: omp.arrayinit.done: 1858 // CHECK2-NEXT: [[TMP2:%.*]] = bitcast [10 x i32]* [[TMP0]] to i32* 1859 // CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[TMP2]] to i64 1860 // CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1861 // CHECK2-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]] 1862 // CHECK2-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1863 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast [2 x i32]* [[A2]] to i32* 1864 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP7]], i64 [[TMP6]] 1865 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to [10 x i32]* 1866 // CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [2 x i32]* [[A2]] to i32* 1867 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1868 // CHECK2-NEXT: br label [[FOR_COND:%.*]] 1869 // CHECK2: for.cond: 1870 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 1871 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1872 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP10]], [[TMP11]] 1873 // CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 1874 // CHECK2: for.body: 1875 // CHECK2-NEXT: br label [[FOR_INC:%.*]] 1876 // CHECK2: for.inc: 1877 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4 1878 // CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1 1879 // CHECK2-NEXT: store i32 [[INC]], i32* [[I]], align 4 1880 // CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] 1881 // CHECK2: for.end: 1882 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1883 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 1884 // CHECK2-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 4 1885 // CHECK2-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1886 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4 1887 // CHECK2-NEXT: [[TMP17:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1888 // CHECK2-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32 1, i32 4, i8* [[TMP17]], void (i8*, i8*)* @.omp.reduction.reduction_func.10, [8 x i32]* @.gomp_critical_user_.reduction.var) 1889 // CHECK2-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1890 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1891 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1892 // CHECK2-NEXT: ] 1893 // CHECK2: .omp.reduction.case1: 1894 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 2 1895 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP19]] 1896 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1897 // CHECK2: omp.arraycpy.body: 1898 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1899 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1900 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 1901 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1902 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] 1903 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 1904 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 1905 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1906 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP19]] 1907 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 1908 // CHECK2: omp.arraycpy.done6: 1909 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1910 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1911 // CHECK2: .omp.reduction.case2: 1912 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 2 1913 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP22]] 1914 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 1915 // CHECK2: omp.arraycpy.body8: 1916 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 1917 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 1918 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 1919 // CHECK2-NEXT: [[TMP24:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP23]] monotonic, align 4 1920 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 1921 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 1922 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP22]] 1923 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 1924 // CHECK2: omp.arraycpy.done14: 1925 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1926 // CHECK2: .omp.reduction.default: 1927 // CHECK2-NEXT: ret void 1928 // 1929 // 1930 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.10 1931 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1932 // CHECK2-NEXT: entry: 1933 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1934 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1935 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1936 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1937 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1938 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1939 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1940 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1941 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1942 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1943 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1944 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1945 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1946 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1947 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 2 1948 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 1949 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1950 // CHECK2: omp.arraycpy.body: 1951 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1952 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1953 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1954 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1955 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1956 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1957 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1958 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1959 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 1960 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 1961 // CHECK2: omp.arraycpy.done2: 1962 // CHECK2-NEXT: ret void 1963 // 1964 // 1965 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81 1966 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR6]] { 1967 // CHECK2-NEXT: entry: 1968 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1969 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1970 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1971 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1972 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1973 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1974 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1975 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[SIZE_CASTED]], align 4 1976 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1977 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], [10 x i32]* [[TMP0]]) 1978 // CHECK2-NEXT: ret void 1979 // 1980 // 1981 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 1982 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR1]] { 1983 // CHECK2-NEXT: entry: 1984 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1985 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1986 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1987 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1988 // CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4 1989 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1990 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1991 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1992 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1993 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1994 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1995 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1996 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 3 1997 // CHECK2-NEXT: store i32 0, i32* [[A1]], align 4 1998 // CHECK2-NEXT: [[TMP1:%.*]] = bitcast [10 x i32]* [[TMP0]] to i32* 1999 // CHECK2-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[TMP1]] to i64 2000 // CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 2001 // CHECK2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] 2002 // CHECK2-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 2003 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[A1]], i64 [[TMP5]] 2004 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to [10 x i32]* 2005 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 2006 // CHECK2-NEXT: br label [[FOR_COND:%.*]] 2007 // CHECK2: for.cond: 2008 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 2009 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 2010 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] 2011 // CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 2012 // CHECK2: for.body: 2013 // CHECK2-NEXT: br label [[FOR_INC:%.*]] 2014 // CHECK2: for.inc: 2015 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 2016 // CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1 2017 // CHECK2-NEXT: store i32 [[INC]], i32* [[I]], align 4 2018 // CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 2019 // CHECK2: for.end: 2020 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 2021 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i32* [[A1]] to i8* 2022 // CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 4 2023 // CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 2024 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4 2025 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2026 // CHECK2-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 1, i32 4, i8* [[TMP15]], void (i8*, i8*)* @.omp.reduction.reduction_func.14, [8 x i32]* @.gomp_critical_user_.reduction.var) 2027 // CHECK2-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2028 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2029 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2030 // CHECK2-NEXT: ] 2031 // CHECK2: .omp.reduction.case1: 2032 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 2033 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[A1]], align 4 2034 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], [[TMP18]] 2035 // CHECK2-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX]], align 4 2036 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2037 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2038 // CHECK2: .omp.reduction.case2: 2039 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[A1]], align 4 2040 // CHECK2-NEXT: [[TMP20:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP19]] monotonic, align 4 2041 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2042 // CHECK2: .omp.reduction.default: 2043 // CHECK2-NEXT: ret void 2044 // 2045 // 2046 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.14 2047 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 2048 // CHECK2-NEXT: entry: 2049 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 2050 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 2051 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 2052 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 2053 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 2054 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2055 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 2056 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2057 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 2058 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 2059 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 2060 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 2061 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 2062 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 2063 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 2064 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 2065 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2066 // CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 2067 // CHECK2-NEXT: ret void 2068 // 2069 // 2070 // CHECK2-LABEL: define {{[^@]+}}@main 2071 // CHECK2-SAME: () #[[ATTR7:[0-9]+]] { 2072 // CHECK2-NEXT: entry: 2073 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 2074 // CHECK2-NEXT: [[SIZE:%.*]] = alloca i32, align 4 2075 // CHECK2-NEXT: [[ARRAY:%.*]] = alloca i32*, align 4 2076 // CHECK2-NEXT: [[RESULT:%.*]] = alloca i32, align 4 2077 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 2078 // CHECK2-NEXT: store i32 100, i32* [[SIZE]], align 4 2079 // CHECK2-NEXT: [[CALL:%.*]] = call noalias noundef nonnull i8* @_Znaj(i32 noundef 400) #[[ATTR10:[0-9]+]] 2080 // CHECK2-NEXT: [[TMP0:%.*]] = bitcast i8* [[CALL]] to i32* 2081 // CHECK2-NEXT: store i32* [[TMP0]], i32** [[ARRAY]], align 4 2082 // CHECK2-NEXT: store i32 0, i32* [[RESULT]], align 4 2083 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[ARRAY]], align 4 2084 // CHECK2-NEXT: call void @_Z3sumPiiS_(i32* noundef [[TMP1]], i32 noundef 100, i32* noundef [[RESULT]]) 2085 // CHECK2-NEXT: ret i32 0 2086 // 2087 // 2088 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 2089 // CHECK2-SAME: () #[[ATTR9:[0-9]+]] { 2090 // CHECK2-NEXT: entry: 2091 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1) 2092 // CHECK2-NEXT: ret void 2093 // 2094