1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-cuda-mode -x c++ \ 3 // RUN: -triple powerpc64le-unknown-unknown -DCUDA \ 4 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o \ 5 // RUN: %t-ppc-host.bc 6 7 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-cuda-mode -x c++ \ 8 // RUN: -triple nvptx64-unknown-unknown -DCUA \ 9 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda -DCUDA -emit-llvm %s \ 10 // RUN: -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc \ 11 // RUN: -o - | FileCheck %s --check-prefix CHECK 12 13 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ \ 14 // RUN: -triple powerpc64le-unknown-unknown -DDIAG\ 15 // RUN: -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm \ 16 // RUN: %s -o - | FileCheck %s \ 17 // RUN: --check-prefix=CHECK1 18 19 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ \ 20 // RUN: -triple i386-unknown-unknown \ 21 // RUN: -fopenmp-targets=i386-pc-linux-gnu -emit-llvm \ 22 // RUN: %s -o - | FileCheck %s \ 23 // RUN: --check-prefix=CHECK2 24 25 26 #if defined(CUDA) 27 // expected-no-diagnostics 28 29 int foo(int n) { 30 double *e; 31 //no error and no implicit map generated for e[:1] 32 #pragma omp target parallel reduction(+: e[:1]) 33 *e=10; 34 ; 35 return 0; 36 } 37 // CHECK-NOT @.offload_maptypes 38 #elif defined(DIAG) 39 class S2 { 40 mutable int a; 41 public: 42 S2():a(0) { } 43 S2(S2 &s2):a(s2.a) { } 44 S2 &operator +(S2 &s); 45 }; 46 int bar() { 47 S2 o[5]; 48 //warnig "copyable and not guaranteed to be mapped correctly" and 49 //implicit map generated. 50 #pragma omp target parallel reduction(+:o[0]) //expected-warning {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} 51 for (int i = 0; i < 10; i++); 52 double b[10][10][10]; 53 //no error no implicit map generated, the map for b is generated but not 54 //for b[0:2][2:4][1]. 55 #pragma omp target parallel for reduction(task, +: b[0:2][2:4][1]) 56 for (long long i = 0; i < 10; ++i); 57 return 0; 58 } 59 // map for variable o 60 // map for b: 61 #else 62 // expected-no-diagnostics 63 64 // generate implicit map for array elements or array sections in reduction 65 // clause. In following case: the implicit map is generate for output[0] 66 // with map size 4 and output[:3] with map size 12. 67 void sum(int* input, int size, int* output) 68 { 69 #pragma omp target teams distribute parallel for reduction(+: output[0]) \ 70 map(to: input [0:size]) 71 for (int i = 0; i < size; i++) 72 output[0] += input[i]; 73 #pragma omp target teams distribute parallel for reduction(+: output[:3]) \ 74 map(to: input [0:size]) 75 for (int i = 0; i < size; i++) 76 output[0] += input[i]; 77 int a[10]; 78 #pragma omp target parallel reduction(+: a[:2]) 79 for (int i = 0; i < size; i++) 80 ; 81 #pragma omp target parallel reduction(+: a[3]) 82 for (int i = 0; i < size; i++) 83 ; 84 } 85 #endif 86 int main() 87 { 88 #if defined(CUDA) 89 int a = foo(10); 90 #elif defined(DIAG) 91 int a = bar(); 92 #else 93 const int size = 100; 94 int *array = new int[size]; 95 int result = 0; 96 sum(array, size, &result); 97 #endif 98 return 0; 99 } 100 // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l32 101 // CHECK-SAME: (double* noundef [[E:%.*]]) #[[ATTR0:[0-9]+]] { 102 // CHECK-NEXT: entry: 103 // CHECK-NEXT: [[E_ADDR:%.*]] = alloca double*, align 8 104 // CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 105 // CHECK-NEXT: store double* [[E]], double** [[E_ADDR]], align 8 106 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true) 107 // CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 108 // CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 109 // CHECK: user_code.entry: 110 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 111 // CHECK-NEXT: [[TMP2:%.*]] = load double*, double** [[E_ADDR]], align 8 112 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 113 // CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[TMP2]] to i8* 114 // CHECK-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8 115 // CHECK-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 116 // CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, double*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP5]], i64 1) 117 // CHECK-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 118 // CHECK-NEXT: ret void 119 // CHECK: worker.exit: 120 // CHECK-NEXT: ret void 121 // 122 // 123 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__ 124 // CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], double* noundef [[E:%.*]]) #[[ATTR1:[0-9]+]] { 125 // CHECK-NEXT: entry: 126 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 127 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 128 // CHECK-NEXT: [[E_ADDR:%.*]] = alloca double*, align 8 129 // CHECK-NEXT: [[E2:%.*]] = alloca double, align 8 130 // CHECK-NEXT: [[TMP:%.*]] = alloca double*, align 8 131 // CHECK-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 132 // CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 133 // CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 134 // CHECK-NEXT: store double* [[E]], double** [[E_ADDR]], align 8 135 // CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[E_ADDR]], align 8 136 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP0]], i64 0 137 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[E_ADDR]], align 8 138 // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[TMP1]], i64 0 139 // CHECK-NEXT: store double 0.000000e+00, double* [[E2]], align 8 140 // CHECK-NEXT: [[TMP2:%.*]] = load double*, double** [[E_ADDR]], align 8 141 // CHECK-NEXT: [[TMP3:%.*]] = ptrtoint double* [[TMP2]] to i64 142 // CHECK-NEXT: [[TMP4:%.*]] = ptrtoint double* [[ARRAYIDX]] to i64 143 // CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]] 144 // CHECK-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 145 // CHECK-NEXT: [[TMP7:%.*]] = getelementptr double, double* [[E2]], i64 [[TMP6]] 146 // CHECK-NEXT: store double* [[TMP7]], double** [[TMP]], align 8 147 // CHECK-NEXT: [[TMP8:%.*]] = load double*, double** [[TMP]], align 8 148 // CHECK-NEXT: store double 1.000000e+01, double* [[TMP8]], align 8 149 // CHECK-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 150 // CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 151 // CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 152 // CHECK-NEXT: [[TMP12:%.*]] = bitcast double* [[E2]] to i8* 153 // CHECK-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8 154 // CHECK-NEXT: [[TMP13:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 155 // CHECK-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(%struct.ident_t* @[[GLOB2]], i32 [[TMP10]], i32 1, i64 8, i8* [[TMP13]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func) 156 // CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[TMP14]], 1 157 // CHECK-NEXT: br i1 [[TMP15]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]] 158 // CHECK: .omp.reduction.then: 159 // CHECK-NEXT: [[TMP16:%.*]] = load double, double* [[ARRAYIDX]], align 8 160 // CHECK-NEXT: [[TMP17:%.*]] = load double, double* [[E2]], align 8 161 // CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP16]], [[TMP17]] 162 // CHECK-NEXT: store double [[ADD]], double* [[ARRAYIDX]], align 8 163 // CHECK-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP10]]) 164 // CHECK-NEXT: br label [[DOTOMP_REDUCTION_DONE]] 165 // CHECK: .omp.reduction.done: 166 // CHECK-NEXT: ret void 167 // 168 // 169 // CHECK-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func 170 // CHECK-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] { 171 // CHECK-NEXT: entry: 172 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 173 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2 174 // CHECK-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2 175 // CHECK-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2 176 // CHECK-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x i8*], align 8 177 // CHECK-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8 178 // CHECK-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 179 // CHECK-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2 180 // CHECK-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2 181 // CHECK-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2 182 // CHECK-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 8 183 // CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 184 // CHECK-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2 185 // CHECK-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2 186 // CHECK-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2 187 // CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 188 // CHECK-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to double** 189 // CHECK-NEXT: [[TMP11:%.*]] = load double*, double** [[TMP10]], align 8 190 // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 191 // CHECK-NEXT: [[TMP13:%.*]] = getelementptr double, double* [[TMP11]], i64 1 192 // CHECK-NEXT: [[TMP14:%.*]] = bitcast double* [[TMP13]] to i8* 193 // CHECK-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP11]] to i64* 194 // CHECK-NEXT: [[TMP16:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i64* 195 // CHECK-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP15]], align 8 196 // CHECK-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_get_warp_size() 197 // CHECK-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16 198 // CHECK-NEXT: [[TMP20:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP17]], i16 [[TMP7]], i16 [[TMP19]]) 199 // CHECK-NEXT: store i64 [[TMP20]], i64* [[TMP16]], align 8 200 // CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, i64* [[TMP15]], i64 1 201 // CHECK-NEXT: [[TMP22:%.*]] = getelementptr i64, i64* [[TMP16]], i64 1 202 // CHECK-NEXT: [[TMP23:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i8* 203 // CHECK-NEXT: store i8* [[TMP23]], i8** [[TMP12]], align 8 204 // CHECK-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP8]], 0 205 // CHECK-NEXT: [[TMP25:%.*]] = icmp eq i16 [[TMP8]], 1 206 // CHECK-NEXT: [[TMP26:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]] 207 // CHECK-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]] 208 // CHECK-NEXT: [[TMP28:%.*]] = icmp eq i16 [[TMP8]], 2 209 // CHECK-NEXT: [[TMP29:%.*]] = and i16 [[TMP6]], 1 210 // CHECK-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP29]], 0 211 // CHECK-NEXT: [[TMP31:%.*]] = and i1 [[TMP28]], [[TMP30]] 212 // CHECK-NEXT: [[TMP32:%.*]] = icmp sgt i16 [[TMP7]], 0 213 // CHECK-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]] 214 // CHECK-NEXT: [[TMP34:%.*]] = or i1 [[TMP24]], [[TMP27]] 215 // CHECK-NEXT: [[TMP35:%.*]] = or i1 [[TMP34]], [[TMP33]] 216 // CHECK-NEXT: br i1 [[TMP35]], label [[THEN:%.*]], label [[ELSE:%.*]] 217 // CHECK: then: 218 // CHECK-NEXT: [[TMP36:%.*]] = bitcast [1 x i8*]* [[TMP5]] to i8* 219 // CHECK-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8* 220 // CHECK-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP36]], i8* [[TMP37]]) #[[ATTR3:[0-9]+]] 221 // CHECK-NEXT: br label [[IFCONT:%.*]] 222 // CHECK: else: 223 // CHECK-NEXT: br label [[IFCONT]] 224 // CHECK: ifcont: 225 // CHECK-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1 226 // CHECK-NEXT: [[TMP39:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]] 227 // CHECK-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]] 228 // CHECK-NEXT: br i1 [[TMP40]], label [[THEN4:%.*]], label [[ELSE5:%.*]] 229 // CHECK: then4: 230 // CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 231 // CHECK-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to double** 232 // CHECK-NEXT: [[TMP43:%.*]] = load double*, double** [[TMP42]], align 8 233 // CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 234 // CHECK-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to double** 235 // CHECK-NEXT: [[TMP46:%.*]] = load double*, double** [[TMP45]], align 8 236 // CHECK-NEXT: [[TMP47:%.*]] = load double, double* [[TMP43]], align 8 237 // CHECK-NEXT: store double [[TMP47]], double* [[TMP46]], align 8 238 // CHECK-NEXT: br label [[IFCONT6:%.*]] 239 // CHECK: else5: 240 // CHECK-NEXT: br label [[IFCONT6]] 241 // CHECK: ifcont6: 242 // CHECK-NEXT: ret void 243 // 244 // 245 // CHECK-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func 246 // CHECK-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] { 247 // CHECK-NEXT: entry: 248 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 249 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 250 // CHECK-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4 251 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 252 // CHECK-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 253 // CHECK-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 254 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() 255 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() 256 // CHECK-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31 257 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() 258 // CHECK-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5 259 // CHECK-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8 260 // CHECK-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [1 x i8*]* 261 // CHECK-NEXT: store i32 0, i32* [[DOTCNT_ADDR]], align 4 262 // CHECK-NEXT: br label [[PRECOND:%.*]] 263 // CHECK: precond: 264 // CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCNT_ADDR]], align 4 265 // CHECK-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 2 266 // CHECK-NEXT: br i1 [[TMP9]], label [[BODY:%.*]], label [[EXIT:%.*]] 267 // CHECK: body: 268 // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 269 // CHECK-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0 270 // CHECK-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]] 271 // CHECK: then: 272 // CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0 273 // CHECK-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP10]], align 8 274 // CHECK-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* 275 // CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP12]], i32 [[TMP8]] 276 // CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]] 277 // CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]], align 4 278 // CHECK-NEXT: store volatile i32 [[TMP15]], i32 addrspace(3)* [[TMP14]], align 4 279 // CHECK-NEXT: br label [[IFCONT:%.*]] 280 // CHECK: else: 281 // CHECK-NEXT: br label [[IFCONT]] 282 // CHECK: ifcont: 283 // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]) 284 // CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTADDR1]], align 4 285 // CHECK-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP16]] 286 // CHECK-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]] 287 // CHECK: then2: 288 // CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]] 289 // CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0 290 // CHECK-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8 291 // CHECK-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32* 292 // CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP20]], i32 [[TMP8]] 293 // CHECK-NEXT: [[TMP22:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP17]], align 4 294 // CHECK-NEXT: store i32 [[TMP22]], i32* [[TMP21]], align 4 295 // CHECK-NEXT: br label [[IFCONT4:%.*]] 296 // CHECK: else3: 297 // CHECK-NEXT: br label [[IFCONT4]] 298 // CHECK: ifcont4: 299 // CHECK-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP8]], 1 300 // CHECK-NEXT: store i32 [[TMP23]], i32* [[DOTCNT_ADDR]], align 4 301 // CHECK-NEXT: br label [[PRECOND]] 302 // CHECK: exit: 303 // CHECK-NEXT: ret void 304 // 305 // 306 // CHECK1-LABEL: define {{[^@]+}}@_Z3barv 307 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 308 // CHECK1-NEXT: entry: 309 // CHECK1-NEXT: [[O:%.*]] = alloca [5 x %class.S2], align 4 310 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 311 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 312 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 313 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x [10 x [10 x double]]], align 8 314 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8 315 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8 316 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8 317 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[O]], i32 0, i32 0 318 // CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[ARRAY_BEGIN]], i64 5 319 // CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] 320 // CHECK1: arrayctor.loop: 321 // CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %class.S2* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ] 322 // CHECK1-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]]) 323 // CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S2]], %class.S2* [[ARRAYCTOR_CUR]], i64 1 324 // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S2* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] 325 // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] 326 // CHECK1: arrayctor.cont: 327 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[O]], i64 0, i64 0 328 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 329 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [5 x %class.S2]** 330 // CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[TMP1]], align 8 331 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 332 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to %class.S2** 333 // CHECK1-NEXT: store %class.S2* [[ARRAYIDX]], %class.S2** [[TMP3]], align 8 334 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 335 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 336 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 337 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 338 // CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 339 // CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 340 // CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 341 // CHECK1: omp_offload.failed: 342 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50([5 x %class.S2]* [[O]]) #[[ATTR8:[0-9]+]] 343 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 344 // CHECK1: omp_offload.cont: 345 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 346 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x [10 x [10 x double]]]** 347 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[TMP10]], align 8 348 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 349 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x [10 x [10 x double]]]** 350 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[TMP12]], align 8 351 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0 352 // CHECK1-NEXT: store i8* null, i8** [[TMP13]], align 8 353 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 354 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 355 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.3, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 356 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 357 // CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]] 358 // CHECK1: omp_offload.failed4: 359 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55([10 x [10 x [10 x double]]]* [[B]]) #[[ATTR8]] 360 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT5]] 361 // CHECK1: omp_offload.cont5: 362 // CHECK1-NEXT: ret i32 0 363 // 364 // 365 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S2C1Ev 366 // CHECK1-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { 367 // CHECK1-NEXT: entry: 368 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8 369 // CHECK1-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8 370 // CHECK1-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8 371 // CHECK1-NEXT: call void @_ZN2S2C2Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) 372 // CHECK1-NEXT: ret void 373 // 374 // 375 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50 376 // CHECK1-SAME: ([5 x %class.S2]* noundef nonnull align 4 dereferenceable(20) [[O:%.*]]) #[[ATTR2:[0-9]+]] { 377 // CHECK1-NEXT: entry: 378 // CHECK1-NEXT: [[O_ADDR:%.*]] = alloca [5 x %class.S2]*, align 8 379 // CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[O_ADDR]], align 8 380 // CHECK1-NEXT: [[TMP0:%.*]] = load [5 x %class.S2]*, [5 x %class.S2]** [[O_ADDR]], align 8 381 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [5 x %class.S2]*)* @.omp_outlined. to void (i32*, i32*, ...)*), [5 x %class.S2]* [[TMP0]]) 382 // CHECK1-NEXT: ret void 383 // 384 // 385 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 386 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [5 x %class.S2]* noundef nonnull align 4 dereferenceable(20) [[O:%.*]]) #[[ATTR3:[0-9]+]] { 387 // CHECK1-NEXT: entry: 388 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 389 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 390 // CHECK1-NEXT: [[O_ADDR:%.*]] = alloca [5 x %class.S2]*, align 8 391 // CHECK1-NEXT: [[O1:%.*]] = alloca [[CLASS_S2:%.*]], align 4 392 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 393 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 394 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 395 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 396 // CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[O_ADDR]], align 8 397 // CHECK1-NEXT: [[TMP0:%.*]] = load [5 x %class.S2]*, [5 x %class.S2]** [[O_ADDR]], align 8 398 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[TMP0]], i64 0, i64 0 399 // CHECK1-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) 400 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast [5 x %class.S2]* [[TMP0]] to %class.S2* 401 // CHECK1-NEXT: [[TMP2:%.*]] = ptrtoint %class.S2* [[TMP1]] to i64 402 // CHECK1-NEXT: [[TMP3:%.*]] = ptrtoint %class.S2* [[ARRAYIDX]] to i64 403 // CHECK1-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] 404 // CHECK1-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (%class.S2* getelementptr ([[CLASS_S2]], %class.S2* null, i32 1) to i64) 405 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr [[CLASS_S2]], %class.S2* [[O1]], i64 [[TMP5]] 406 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast %class.S2* [[TMP6]] to [5 x %class.S2]* 407 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4 408 // CHECK1-NEXT: br label [[FOR_COND:%.*]] 409 // CHECK1: for.cond: 410 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 411 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], 10 412 // CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 413 // CHECK1: for.body: 414 // CHECK1-NEXT: br label [[FOR_INC:%.*]] 415 // CHECK1: for.inc: 416 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4 417 // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1 418 // CHECK1-NEXT: store i32 [[INC]], i32* [[I]], align 4 419 // CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] 420 // CHECK1: for.end: 421 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 422 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast %class.S2* [[O1]] to i8* 423 // CHECK1-NEXT: store i8* [[TMP11]], i8** [[TMP10]], align 8 424 // CHECK1-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 425 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 426 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 427 // CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP13]], i32 1, i64 8, i8* [[TMP14]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 428 // CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 429 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 430 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 431 // CHECK1-NEXT: ] 432 // CHECK1: .omp.reduction.case1: 433 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYIDX]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) 434 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast %class.S2* [[ARRAYIDX]] to i8* 435 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast %class.S2* [[CALL]] to i8* 436 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false) 437 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP13]], [8 x i32]* @.gomp_critical_user_.reduction.var) 438 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 439 // CHECK1: .omp.reduction.case2: 440 // CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 441 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4 442 // CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 443 // CHECK1-NEXT: [[CALL2:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYIDX]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) 444 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast %class.S2* [[ARRAYIDX]] to i8* 445 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast %class.S2* [[CALL2]] to i8* 446 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP20]], i8* align 4 [[TMP21]], i64 4, i1 false) 447 // CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 448 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 449 // CHECK1: .omp.reduction.default: 450 // CHECK1-NEXT: ret void 451 // 452 // 453 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 454 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] { 455 // CHECK1-NEXT: entry: 456 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 457 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 458 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 459 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 460 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 461 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 462 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 463 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 464 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 465 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 466 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %class.S2* 467 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 468 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 469 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %class.S2* 470 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[TMP11]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[TMP8]]) 471 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast %class.S2* [[TMP11]] to i8* 472 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast %class.S2* [[CALL]] to i8* 473 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i64 4, i1 false) 474 // CHECK1-NEXT: ret void 475 // 476 // 477 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55 478 // CHECK1-SAME: ([10 x [10 x [10 x double]]]* noundef nonnull align 8 dereferenceable(8000) [[B:%.*]]) #[[ATTR2]] { 479 // CHECK1-NEXT: entry: 480 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x [10 x [10 x double]]]*, align 8 481 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 482 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x [10 x [10 x double]]]*, [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 483 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x [10 x [10 x double]]]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [10 x [10 x [10 x double]]]* [[TMP0]]) 484 // CHECK1-NEXT: ret void 485 // 486 // 487 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 488 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x [10 x [10 x double]]]* noundef nonnull align 8 dereferenceable(8000) [[B:%.*]]) #[[ATTR3]] { 489 // CHECK1-NEXT: entry: 490 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 491 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 492 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x [10 x [10 x double]]]*, align 8 493 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 494 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 495 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 496 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 497 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 498 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 499 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 500 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 501 // CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [1 x %struct.kmp_taskred_input_t], align 8 502 // CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8 503 // CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8 504 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8 505 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 506 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 507 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 508 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x [10 x [10 x double]]]*, [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 509 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 510 // CHECK1-NEXT: store i64 9, i64* [[DOTOMP_UB]], align 8 511 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 512 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 513 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 0 514 // CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX]], i64 0, i64 0 515 // CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY]], i64 2 516 // CHECK1-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX1]], i64 0, i64 0 517 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY2]], i64 1 518 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 1 519 // CHECK1-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX4]], i64 0, i64 0 520 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY5]], i64 5 521 // CHECK1-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX6]], i64 0, i64 0 522 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY7]], i64 1 523 // CHECK1-NEXT: [[TMP1:%.*]] = ptrtoint double* [[ARRAYIDX8]] to i64 524 // CHECK1-NEXT: [[TMP2:%.*]] = ptrtoint double* [[ARRAYIDX3]] to i64 525 // CHECK1-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], [[TMP2]] 526 // CHECK1-NEXT: [[TMP4:%.*]] = sdiv exact i64 [[TMP3]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 527 // CHECK1-NEXT: [[TMP5:%.*]] = add nuw i64 [[TMP4]], 1 528 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 529 // CHECK1-NEXT: [[TMP7:%.*]] = call i8* @llvm.stacksave() 530 // CHECK1-NEXT: store i8* [[TMP7]], i8** [[SAVED_STACK]], align 8 531 // CHECK1-NEXT: [[VLA:%.*]] = alloca double, i64 [[TMP5]], align 8 532 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR0]], align 8 533 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr double, double* [[VLA]], i64 [[TMP5]] 534 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq double* [[VLA]], [[TMP8]] 535 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 536 // CHECK1: omp.arrayinit.body: 537 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 538 // CHECK1-NEXT: store double 0.000000e+00, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 539 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 540 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] 541 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 542 // CHECK1: omp.arrayinit.done: 543 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast [10 x [10 x [10 x double]]]* [[TMP0]] to double* 544 // CHECK1-NEXT: [[TMP10:%.*]] = ptrtoint double* [[TMP9]] to i64 545 // CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint double* [[ARRAYIDX3]] to i64 546 // CHECK1-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]] 547 // CHECK1-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 548 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr double, double* [[VLA]], i64 [[TMP13]] 549 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP14]] to [10 x [10 x [10 x double]]]* 550 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [1 x %struct.kmp_taskred_input_t], [1 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0 551 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0 552 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 0 553 // CHECK1-NEXT: [[ARRAYDECAY10:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX9]], i64 0, i64 0 554 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY10]], i64 2 555 // CHECK1-NEXT: [[ARRAYDECAY12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i64 0, i64 0 556 // CHECK1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY12]], i64 1 557 // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 1 558 // CHECK1-NEXT: [[ARRAYDECAY15:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX14]], i64 0, i64 0 559 // CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY15]], i64 5 560 // CHECK1-NEXT: [[ARRAYDECAY17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 0 561 // CHECK1-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY17]], i64 1 562 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast double* [[VLA]] to i8* 563 // CHECK1-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8 564 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1 565 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast double* [[ARRAYIDX13]] to i8* 566 // CHECK1-NEXT: store i8* [[TMP19]], i8** [[TMP18]], align 8 567 // CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint double* [[ARRAYIDX18]] to i64 568 // CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint double* [[ARRAYIDX13]] to i64 569 // CHECK1-NEXT: [[TMP22:%.*]] = sub i64 [[TMP20]], [[TMP21]] 570 // CHECK1-NEXT: [[TMP23:%.*]] = sdiv exact i64 [[TMP22]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 571 // CHECK1-NEXT: [[TMP24:%.*]] = add nuw i64 [[TMP23]], 1 572 // CHECK1-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 573 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2 574 // CHECK1-NEXT: store i64 [[TMP25]], i64* [[TMP26]], align 8 575 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3 576 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP27]], align 8 577 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4 578 // CHECK1-NEXT: store i8* null, i8** [[TMP28]], align 8 579 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5 580 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP29]], align 8 581 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6 582 // CHECK1-NEXT: store i32 1, i32* [[TMP30]], align 8 583 // CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 584 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4 585 // CHECK1-NEXT: [[TMP33:%.*]] = bitcast [1 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8* 586 // CHECK1-NEXT: [[TMP34:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB2]], i32 [[TMP32]], i32 1, i32 1, i8* [[TMP33]]) 587 // CHECK1-NEXT: store i8* [[TMP34]], i8** [[DOTTASK_RED_]], align 8 588 // CHECK1-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 589 // CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4 590 // CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP36]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 591 // CHECK1-NEXT: [[TMP37:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 592 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP37]], 9 593 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 594 // CHECK1: cond.true: 595 // CHECK1-NEXT: br label [[COND_END:%.*]] 596 // CHECK1: cond.false: 597 // CHECK1-NEXT: [[TMP38:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 598 // CHECK1-NEXT: br label [[COND_END]] 599 // CHECK1: cond.end: 600 // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP38]], [[COND_FALSE]] ] 601 // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 602 // CHECK1-NEXT: [[TMP39:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 603 // CHECK1-NEXT: store i64 [[TMP39]], i64* [[DOTOMP_IV]], align 8 604 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 605 // CHECK1: omp.inner.for.cond: 606 // CHECK1-NEXT: [[TMP40:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 607 // CHECK1-NEXT: [[TMP41:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 608 // CHECK1-NEXT: [[CMP19:%.*]] = icmp sle i64 [[TMP40]], [[TMP41]] 609 // CHECK1-NEXT: br i1 [[CMP19]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] 610 // CHECK1: omp.inner.for.cond.cleanup: 611 // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 612 // CHECK1: omp.inner.for.body: 613 // CHECK1-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 614 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP42]], 1 615 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] 616 // CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8 617 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 618 // CHECK1: omp.body.continue: 619 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 620 // CHECK1: omp.inner.for.inc: 621 // CHECK1-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 622 // CHECK1-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP43]], 1 623 // CHECK1-NEXT: store i64 [[ADD20]], i64* [[DOTOMP_IV]], align 8 624 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 625 // CHECK1: omp.inner.for.end: 626 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 627 // CHECK1: omp.loop.exit: 628 // CHECK1-NEXT: [[TMP44:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 629 // CHECK1-NEXT: [[TMP45:%.*]] = load i32, i32* [[TMP44]], align 4 630 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP45]]) 631 // CHECK1-NEXT: [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 632 // CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4 633 // CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP47]], i32 1) 634 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 635 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast double* [[VLA]] to i8* 636 // CHECK1-NEXT: store i8* [[TMP49]], i8** [[TMP48]], align 8 637 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 638 // CHECK1-NEXT: [[TMP51:%.*]] = inttoptr i64 [[TMP5]] to i8* 639 // CHECK1-NEXT: store i8* [[TMP51]], i8** [[TMP50]], align 8 640 // CHECK1-NEXT: [[TMP52:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 641 // CHECK1-NEXT: [[TMP53:%.*]] = load i32, i32* [[TMP52]], align 4 642 // CHECK1-NEXT: [[TMP54:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 643 // CHECK1-NEXT: [[TMP55:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP53]], i32 1, i64 16, i8* [[TMP54]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) 644 // CHECK1-NEXT: switch i32 [[TMP55]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 645 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 646 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 647 // CHECK1-NEXT: ] 648 // CHECK1: .omp.reduction.case1: 649 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr double, double* [[ARRAYIDX3]], i64 [[TMP5]] 650 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[ARRAYIDX3]], [[TMP56]] 651 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE25:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 652 // CHECK1: omp.arraycpy.body: 653 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 654 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi double* [ [[ARRAYIDX3]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT23:%.*]], [[OMP_ARRAYCPY_BODY]] ] 655 // CHECK1-NEXT: [[TMP57:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], align 8 656 // CHECK1-NEXT: [[TMP58:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 657 // CHECK1-NEXT: [[ADD22:%.*]] = fadd double [[TMP57]], [[TMP58]] 658 // CHECK1-NEXT: store double [[ADD22]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], align 8 659 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT23]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1 660 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 661 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE24:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT23]], [[TMP56]] 662 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE24]], label [[OMP_ARRAYCPY_DONE25]], label [[OMP_ARRAYCPY_BODY]] 663 // CHECK1: omp.arraycpy.done25: 664 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP53]], [8 x i32]* @.gomp_critical_user_.reduction.var) 665 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 666 // CHECK1: .omp.reduction.case2: 667 // CHECK1-NEXT: [[TMP59:%.*]] = getelementptr double, double* [[ARRAYIDX3]], i64 [[TMP5]] 668 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY26:%.*]] = icmp eq double* [[ARRAYIDX3]], [[TMP59]] 669 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY26]], label [[OMP_ARRAYCPY_DONE33:%.*]], label [[OMP_ARRAYCPY_BODY27:%.*]] 670 // CHECK1: omp.arraycpy.body27: 671 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST28:%.*]] = phi double* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT31:%.*]], [[OMP_ARRAYCPY_BODY27]] ] 672 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST29:%.*]] = phi double* [ [[ARRAYIDX3]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT30:%.*]], [[OMP_ARRAYCPY_BODY27]] ] 673 // CHECK1-NEXT: [[TMP60:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], align 8 674 // CHECK1-NEXT: [[TMP61:%.*]] = atomicrmw fadd double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]], double [[TMP60]] monotonic, align 8 675 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT30]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]], i32 1 676 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT31]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], i32 1 677 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE32:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT30]], [[TMP59]] 678 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_DONE33]], label [[OMP_ARRAYCPY_BODY27]] 679 // CHECK1: omp.arraycpy.done33: 680 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 681 // CHECK1: .omp.reduction.default: 682 // CHECK1-NEXT: [[TMP62:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 683 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP62]]) 684 // CHECK1-NEXT: ret void 685 // 686 // 687 // CHECK1-LABEL: define {{[^@]+}}@.red_init. 688 // CHECK1-SAME: (i8* noalias noundef [[TMP0:%.*]], i8* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] { 689 // CHECK1-NEXT: entry: 690 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 691 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 692 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 693 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 694 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to double** 695 // CHECK1-NEXT: [[TMP3:%.*]] = load double*, double** [[TMP2]], align 8 696 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* @{{reduction_size[.].+[.]}}, align 8 697 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr double, double* [[TMP3]], i64 [[TMP4]] 698 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq double* [[TMP3]], [[TMP5]] 699 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 700 // CHECK1: omp.arrayinit.body: 701 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 702 // CHECK1-NEXT: store double 0.000000e+00, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 703 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 704 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP5]] 705 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 706 // CHECK1: omp.arrayinit.done: 707 // CHECK1-NEXT: ret void 708 // 709 // 710 // CHECK1-LABEL: define {{[^@]+}}@.red_comb. 711 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] { 712 // CHECK1-NEXT: entry: 713 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 714 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 715 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 716 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 717 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* @{{reduction_size[.].+[.]}}, align 8 718 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[DOTADDR]] to double** 719 // CHECK1-NEXT: [[TMP4:%.*]] = load double*, double** [[TMP3]], align 8 720 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8** [[DOTADDR1]] to double** 721 // CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[TMP5]], align 8 722 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr double, double* [[TMP4]], i64 [[TMP2]] 723 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[TMP4]], [[TMP7]] 724 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 725 // CHECK1: omp.arraycpy.body: 726 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[TMP6]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 727 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 728 // CHECK1-NEXT: [[TMP8:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 729 // CHECK1-NEXT: [[TMP9:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 730 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP8]], [[TMP9]] 731 // CHECK1-NEXT: store double [[ADD]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 732 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 733 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 734 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP7]] 735 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 736 // CHECK1: omp.arraycpy.done2: 737 // CHECK1-NEXT: ret void 738 // 739 // 740 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 741 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] { 742 // CHECK1-NEXT: entry: 743 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 744 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 745 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 746 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 747 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 748 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]* 749 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 750 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]* 751 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0 752 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 753 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double* 754 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0 755 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 756 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double* 757 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1 758 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8 759 // CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint i8* [[TMP13]] to i64 760 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr double, double* [[TMP11]], i64 [[TMP14]] 761 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[TMP11]], [[TMP15]] 762 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 763 // CHECK1: omp.arraycpy.body: 764 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 765 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 766 // CHECK1-NEXT: [[TMP16:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 767 // CHECK1-NEXT: [[TMP17:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 768 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP16]], [[TMP17]] 769 // CHECK1-NEXT: store double [[ADD]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 770 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 771 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 772 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP15]] 773 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 774 // CHECK1: omp.arraycpy.done2: 775 // CHECK1-NEXT: ret void 776 // 777 // 778 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S2C2Ev 779 // CHECK1-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 { 780 // CHECK1-NEXT: entry: 781 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8 782 // CHECK1-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8 783 // CHECK1-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8 784 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[THIS1]], i32 0, i32 0 785 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 786 // CHECK1-NEXT: ret void 787 // 788 // 789 // CHECK1-LABEL: define {{[^@]+}}@main 790 // CHECK1-SAME: () #[[ATTR10:[0-9]+]] { 791 // CHECK1-NEXT: entry: 792 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 793 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 794 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 795 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3barv() 796 // CHECK1-NEXT: store i32 [[CALL]], i32* [[A]], align 4 797 // CHECK1-NEXT: ret i32 0 798 // 799 // 800 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 801 // CHECK1-SAME: () #[[ATTR11:[0-9]+]] { 802 // CHECK1-NEXT: entry: 803 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 804 // CHECK1-NEXT: ret void 805 // 806 // 807 // CHECK2-LABEL: define {{[^@]+}}@_Z3sumPiiS_ 808 // CHECK2-SAME: (i32* noundef [[INPUT:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]]) #[[ATTR0:[0-9]+]] { 809 // CHECK2-NEXT: entry: 810 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 811 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 812 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 813 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 814 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 815 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 816 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 817 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 818 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 819 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 820 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 821 // CHECK2-NEXT: [[SIZE_CASTED4:%.*]] = alloca i32, align 4 822 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [3 x i8*], align 4 823 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [3 x i8*], align 4 824 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [3 x i8*], align 4 825 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES10:%.*]] = alloca [3 x i64], align 4 826 // CHECK2-NEXT: [[_TMP11:%.*]] = alloca i32, align 4 827 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4 828 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_13:%.*]] = alloca i32, align 4 829 // CHECK2-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 830 // CHECK2-NEXT: [[SIZE_CASTED20:%.*]] = alloca i32, align 4 831 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x i8*], align 4 832 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x i8*], align 4 833 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x i8*], align 4 834 // CHECK2-NEXT: [[SIZE_CASTED27:%.*]] = alloca i32, align 4 835 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [2 x i8*], align 4 836 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [2 x i8*], align 4 837 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [2 x i8*], align 4 838 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 839 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 840 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 841 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 842 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 843 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 844 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 845 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 846 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 847 // CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 848 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 849 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 850 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 851 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0 852 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 853 // CHECK2-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4 854 // CHECK2-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64 855 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES]] to i8* 856 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP11]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false) 857 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 858 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 859 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[TMP13]], align 4 860 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 861 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 862 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[TMP15]], align 4 863 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 864 // CHECK2-NEXT: store i8* null, i8** [[TMP16]], align 4 865 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 866 // CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32** 867 // CHECK2-NEXT: store i32* [[TMP4]], i32** [[TMP18]], align 4 868 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 869 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32** 870 // CHECK2-NEXT: store i32* [[ARRAYIDX]], i32** [[TMP20]], align 4 871 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 872 // CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 4 873 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 874 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32** 875 // CHECK2-NEXT: store i32* [[TMP6]], i32** [[TMP23]], align 4 876 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 877 // CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32** 878 // CHECK2-NEXT: store i32* [[ARRAYIDX1]], i32** [[TMP25]], align 4 879 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 880 // CHECK2-NEXT: store i64 [[TMP10]], i64* [[TMP26]], align 4 881 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 882 // CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 4 883 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 884 // CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 885 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 886 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 887 // CHECK2-NEXT: store i32 [[TMP31]], i32* [[DOTCAPTURE_EXPR_]], align 4 888 // CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 889 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP32]], 0 890 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 891 // CHECK2-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 892 // CHECK2-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 893 // CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 894 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP33]], 1 895 // CHECK2-NEXT: [[TMP34:%.*]] = zext i32 [[ADD]] to i64 896 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP34]]) 897 // CHECK2-NEXT: [[TMP35:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69.region_id, i32 3, i8** [[TMP28]], i8** [[TMP29]], i64* [[TMP30]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) 898 // CHECK2-NEXT: [[TMP36:%.*]] = icmp ne i32 [[TMP35]], 0 899 // CHECK2-NEXT: br i1 [[TMP36]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 900 // CHECK2: omp_offload.failed: 901 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) #[[ATTR2:[0-9]+]] 902 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 903 // CHECK2: omp_offload.cont: 904 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 905 // CHECK2-NEXT: store i32 [[TMP37]], i32* [[SIZE_CASTED4]], align 4 906 // CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[SIZE_CASTED4]], align 4 907 // CHECK2-NEXT: [[TMP39:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 908 // CHECK2-NEXT: [[TMP40:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 909 // CHECK2-NEXT: [[TMP41:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 910 // CHECK2-NEXT: [[TMP42:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 911 // CHECK2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP42]], i32 0 912 // CHECK2-NEXT: [[TMP43:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 913 // CHECK2-NEXT: [[TMP44:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 914 // CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP44]], i32 0 915 // CHECK2-NEXT: [[TMP45:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 916 // CHECK2-NEXT: [[TMP46:%.*]] = mul nuw i32 [[TMP45]], 4 917 // CHECK2-NEXT: [[TMP47:%.*]] = sext i32 [[TMP46]] to i64 918 // CHECK2-NEXT: [[TMP48:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES10]] to i8* 919 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP48]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes.7 to i8*), i32 24, i1 false) 920 // CHECK2-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 921 // CHECK2-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32* 922 // CHECK2-NEXT: store i32 [[TMP38]], i32* [[TMP50]], align 4 923 // CHECK2-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 924 // CHECK2-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32* 925 // CHECK2-NEXT: store i32 [[TMP38]], i32* [[TMP52]], align 4 926 // CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0 927 // CHECK2-NEXT: store i8* null, i8** [[TMP53]], align 4 928 // CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1 929 // CHECK2-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32** 930 // CHECK2-NEXT: store i32* [[TMP41]], i32** [[TMP55]], align 4 931 // CHECK2-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1 932 // CHECK2-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to i32** 933 // CHECK2-NEXT: store i32* [[ARRAYIDX5]], i32** [[TMP57]], align 4 934 // CHECK2-NEXT: [[TMP58:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1 935 // CHECK2-NEXT: store i8* null, i8** [[TMP58]], align 4 936 // CHECK2-NEXT: [[TMP59:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 2 937 // CHECK2-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32** 938 // CHECK2-NEXT: store i32* [[TMP43]], i32** [[TMP60]], align 4 939 // CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 2 940 // CHECK2-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32** 941 // CHECK2-NEXT: store i32* [[ARRAYIDX6]], i32** [[TMP62]], align 4 942 // CHECK2-NEXT: [[TMP63:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES10]], i32 0, i32 2 943 // CHECK2-NEXT: store i64 [[TMP47]], i64* [[TMP63]], align 4 944 // CHECK2-NEXT: [[TMP64:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 2 945 // CHECK2-NEXT: store i8* null, i8** [[TMP64]], align 4 946 // CHECK2-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 947 // CHECK2-NEXT: [[TMP66:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 948 // CHECK2-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES10]], i32 0, i32 0 949 // CHECK2-NEXT: [[TMP68:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 950 // CHECK2-NEXT: store i32 [[TMP68]], i32* [[DOTCAPTURE_EXPR_12]], align 4 951 // CHECK2-NEXT: [[TMP69:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4 952 // CHECK2-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP69]], 0 953 // CHECK2-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1 954 // CHECK2-NEXT: [[SUB16:%.*]] = sub nsw i32 [[DIV15]], 1 955 // CHECK2-NEXT: store i32 [[SUB16]], i32* [[DOTCAPTURE_EXPR_13]], align 4 956 // CHECK2-NEXT: [[TMP70:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_13]], align 4 957 // CHECK2-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP70]], 1 958 // CHECK2-NEXT: [[TMP71:%.*]] = zext i32 [[ADD17]] to i64 959 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 [[TMP71]]) 960 // CHECK2-NEXT: [[TMP72:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73.region_id, i32 3, i8** [[TMP65]], i8** [[TMP66]], i64* [[TMP67]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) 961 // CHECK2-NEXT: [[TMP73:%.*]] = icmp ne i32 [[TMP72]], 0 962 // CHECK2-NEXT: br i1 [[TMP73]], label [[OMP_OFFLOAD_FAILED18:%.*]], label [[OMP_OFFLOAD_CONT19:%.*]] 963 // CHECK2: omp_offload.failed18: 964 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73(i32 [[TMP38]], i32* [[TMP39]], i32* [[TMP40]]) #[[ATTR2]] 965 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT19]] 966 // CHECK2: omp_offload.cont19: 967 // CHECK2-NEXT: [[TMP74:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 968 // CHECK2-NEXT: store i32 [[TMP74]], i32* [[SIZE_CASTED20]], align 4 969 // CHECK2-NEXT: [[TMP75:%.*]] = load i32, i32* [[SIZE_CASTED20]], align 4 970 // CHECK2-NEXT: [[ARRAYIDX21:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 0 971 // CHECK2-NEXT: [[TMP76:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 972 // CHECK2-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* 973 // CHECK2-NEXT: store i32 [[TMP75]], i32* [[TMP77]], align 4 974 // CHECK2-NEXT: [[TMP78:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 975 // CHECK2-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* 976 // CHECK2-NEXT: store i32 [[TMP75]], i32* [[TMP79]], align 4 977 // CHECK2-NEXT: [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i32 0, i32 0 978 // CHECK2-NEXT: store i8* null, i8** [[TMP80]], align 4 979 // CHECK2-NEXT: [[TMP81:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 980 // CHECK2-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to [10 x i32]** 981 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP82]], align 4 982 // CHECK2-NEXT: [[TMP83:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 983 // CHECK2-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** 984 // CHECK2-NEXT: store i32* [[ARRAYIDX21]], i32** [[TMP84]], align 4 985 // CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i32 0, i32 1 986 // CHECK2-NEXT: store i8* null, i8** [[TMP85]], align 4 987 // CHECK2-NEXT: [[TMP86:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 988 // CHECK2-NEXT: [[TMP87:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 989 // CHECK2-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78.region_id, i32 2, i8** [[TMP86]], i8** [[TMP87]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 990 // CHECK2-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 991 // CHECK2-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] 992 // CHECK2: omp_offload.failed25: 993 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78(i32 [[TMP75]], [10 x i32]* [[A]]) #[[ATTR2]] 994 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT26]] 995 // CHECK2: omp_offload.cont26: 996 // CHECK2-NEXT: [[TMP90:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 997 // CHECK2-NEXT: store i32 [[TMP90]], i32* [[SIZE_CASTED27]], align 4 998 // CHECK2-NEXT: [[TMP91:%.*]] = load i32, i32* [[SIZE_CASTED27]], align 4 999 // CHECK2-NEXT: [[ARRAYIDX28:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 3 1000 // CHECK2-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 1001 // CHECK2-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i32* 1002 // CHECK2-NEXT: store i32 [[TMP91]], i32* [[TMP93]], align 4 1003 // CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 1004 // CHECK2-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* 1005 // CHECK2-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 1006 // CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 0 1007 // CHECK2-NEXT: store i8* null, i8** [[TMP96]], align 4 1008 // CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 1009 // CHECK2-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x i32]** 1010 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP98]], align 4 1011 // CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 1012 // CHECK2-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32** 1013 // CHECK2-NEXT: store i32* [[ARRAYIDX28]], i32** [[TMP100]], align 4 1014 // CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 1 1015 // CHECK2-NEXT: store i8* null, i8** [[TMP101]], align 4 1016 // CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 1017 // CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 1018 // CHECK2-NEXT: [[TMP104:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81.region_id, i32 2, i8** [[TMP102]], i8** [[TMP103]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) 1019 // CHECK2-NEXT: [[TMP105:%.*]] = icmp ne i32 [[TMP104]], 0 1020 // CHECK2-NEXT: br i1 [[TMP105]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] 1021 // CHECK2: omp_offload.failed32: 1022 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81(i32 [[TMP91]], [10 x i32]* [[A]]) #[[ATTR2]] 1023 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT33]] 1024 // CHECK2: omp_offload.cont33: 1025 // CHECK2-NEXT: ret void 1026 // 1027 // 1028 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69 1029 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1:[0-9]+]] { 1030 // CHECK2-NEXT: entry: 1031 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1032 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1033 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1034 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1035 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1036 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1037 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1038 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1039 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 1040 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1041 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1042 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1043 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) 1044 // CHECK2-NEXT: ret void 1045 // 1046 // 1047 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 1048 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1049 // CHECK2-NEXT: entry: 1050 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1051 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1052 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1053 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1054 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1055 // CHECK2-NEXT: [[OUTPUT1:%.*]] = alloca i32, align 4 1056 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 4 1057 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1058 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 1059 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1060 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 1061 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1062 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1063 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1064 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1065 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1066 // CHECK2-NEXT: [[I5:%.*]] = alloca i32, align 4 1067 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1068 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1069 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1070 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1071 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1072 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1073 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1074 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1075 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 0 1076 // CHECK2-NEXT: store i32 0, i32* [[OUTPUT1]], align 4 1077 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1078 // CHECK2-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[TMP1]] to i64 1079 // CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1080 // CHECK2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] 1081 // CHECK2-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1082 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[OUTPUT1]], i64 [[TMP5]] 1083 // CHECK2-NEXT: store i32* [[TMP6]], i32** [[TMP]], align 4 1084 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1085 // CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_]], align 4 1086 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1087 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0 1088 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1089 // CHECK2-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 1090 // CHECK2-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4 1091 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1092 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1093 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] 1094 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1095 // CHECK2: omp.precond.then: 1096 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1097 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1098 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_COMB_UB]], align 4 1099 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1100 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1101 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1102 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1103 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP12]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1104 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1105 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1106 // CHECK2-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]] 1107 // CHECK2-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1108 // CHECK2: cond.true: 1109 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1110 // CHECK2-NEXT: br label [[COND_END:%.*]] 1111 // CHECK2: cond.false: 1112 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1113 // CHECK2-NEXT: br label [[COND_END]] 1114 // CHECK2: cond.end: 1115 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ] 1116 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1117 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1118 // CHECK2-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4 1119 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1120 // CHECK2: omp.inner.for.cond: 1121 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1122 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1123 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] 1124 // CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1125 // CHECK2: omp.inner.for.body: 1126 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1127 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1128 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1129 // CHECK2-NEXT: store i32 [[TMP22]], i32* [[SIZE_CASTED]], align 4 1130 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1131 // CHECK2-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP]], align 4 1132 // CHECK2-NEXT: [[TMP25:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1133 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP20]], i32 [[TMP21]], i32 [[TMP23]], i32* [[TMP24]], i32* [[TMP25]]) 1134 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1135 // CHECK2: omp.inner.for.inc: 1136 // CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1137 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1138 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP26]], [[TMP27]] 1139 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 1140 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1141 // CHECK2: omp.inner.for.end: 1142 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1143 // CHECK2: omp.loop.exit: 1144 // CHECK2-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1145 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 1146 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP29]]) 1147 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1148 // CHECK2: omp.precond.end: 1149 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1150 // CHECK2-NEXT: [[TMP31:%.*]] = bitcast i32* [[OUTPUT1]] to i8* 1151 // CHECK2-NEXT: store i8* [[TMP31]], i8** [[TMP30]], align 4 1152 // CHECK2-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1153 // CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4 1154 // CHECK2-NEXT: [[TMP34:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1155 // CHECK2-NEXT: [[TMP35:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP33]], i32 1, i32 4, i8* [[TMP34]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) 1156 // CHECK2-NEXT: switch i32 [[TMP35]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1157 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1158 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1159 // CHECK2-NEXT: ] 1160 // CHECK2: .omp.reduction.case1: 1161 // CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1162 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[OUTPUT1]], align 4 1163 // CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]] 1164 // CHECK2-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4 1165 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP33]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1166 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1167 // CHECK2: .omp.reduction.case2: 1168 // CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[OUTPUT1]], align 4 1169 // CHECK2-NEXT: [[TMP39:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP38]] monotonic, align 4 1170 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1171 // CHECK2: .omp.reduction.default: 1172 // CHECK2-NEXT: ret void 1173 // 1174 // 1175 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1 1176 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1177 // CHECK2-NEXT: entry: 1178 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1179 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1180 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4 1181 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4 1182 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1183 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1184 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1185 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1186 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1187 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1188 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1189 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1190 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1191 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1192 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1193 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1194 // CHECK2-NEXT: [[OUTPUT3:%.*]] = alloca i32, align 4 1195 // CHECK2-NEXT: [[_TMP4:%.*]] = alloca i32*, align 4 1196 // CHECK2-NEXT: [[I5:%.*]] = alloca i32, align 4 1197 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1198 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1199 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1200 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1201 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1202 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1203 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1204 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1205 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1206 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 1207 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1208 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0 1209 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1210 // CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 1211 // CHECK2-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1212 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1213 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1214 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]] 1215 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1216 // CHECK2: omp.precond.then: 1217 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1218 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1219 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4 1220 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1221 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1222 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_LB]], align 4 1223 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 1224 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1225 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1226 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1227 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 1228 // CHECK2-NEXT: store i32 0, i32* [[OUTPUT3]], align 4 1229 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1230 // CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint i32* [[TMP7]] to i64 1231 // CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1232 // CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]] 1233 // CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1234 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[OUTPUT3]], i64 [[TMP11]] 1235 // CHECK2-NEXT: store i32* [[TMP12]], i32** [[_TMP4]], align 4 1236 // CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1237 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4 1238 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP14]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1239 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1240 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1241 // CHECK2-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP15]], [[TMP16]] 1242 // CHECK2-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1243 // CHECK2: cond.true: 1244 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1245 // CHECK2-NEXT: br label [[COND_END:%.*]] 1246 // CHECK2: cond.false: 1247 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1248 // CHECK2-NEXT: br label [[COND_END]] 1249 // CHECK2: cond.end: 1250 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP17]], [[COND_TRUE]] ], [ [[TMP18]], [[COND_FALSE]] ] 1251 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1252 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1253 // CHECK2-NEXT: store i32 [[TMP19]], i32* [[DOTOMP_IV]], align 4 1254 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1255 // CHECK2: omp.inner.for.cond: 1256 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1257 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1258 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] 1259 // CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1260 // CHECK2: omp.inner.for.body: 1261 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1262 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1 1263 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1264 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I5]], align 4 1265 // CHECK2-NEXT: [[TMP23:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1266 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[I5]], align 4 1267 // CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]] 1268 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4 1269 // CHECK2-NEXT: [[TMP26:%.*]] = load i32*, i32** [[_TMP4]], align 4 1270 // CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 0 1271 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 1272 // CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP27]], [[TMP25]] 1273 // CHECK2-NEXT: store i32 [[ADD10]], i32* [[ARRAYIDX9]], align 4 1274 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1275 // CHECK2: omp.body.continue: 1276 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1277 // CHECK2: omp.inner.for.inc: 1278 // CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1279 // CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP28]], 1 1280 // CHECK2-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4 1281 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1282 // CHECK2: omp.inner.for.end: 1283 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1284 // CHECK2: omp.loop.exit: 1285 // CHECK2-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1286 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 1287 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 1288 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1289 // CHECK2-NEXT: [[TMP32:%.*]] = bitcast i32* [[OUTPUT3]] to i8* 1290 // CHECK2-NEXT: store i8* [[TMP32]], i8** [[TMP31]], align 4 1291 // CHECK2-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1292 // CHECK2-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4 1293 // CHECK2-NEXT: [[TMP35:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1294 // CHECK2-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP34]], i32 1, i32 4, i8* [[TMP35]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 1295 // CHECK2-NEXT: switch i32 [[TMP36]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1296 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1297 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1298 // CHECK2-NEXT: ] 1299 // CHECK2: .omp.reduction.case1: 1300 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1301 // CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[OUTPUT3]], align 4 1302 // CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP37]], [[TMP38]] 1303 // CHECK2-NEXT: store i32 [[ADD12]], i32* [[ARRAYIDX]], align 4 1304 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP34]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1305 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1306 // CHECK2: .omp.reduction.case2: 1307 // CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[OUTPUT3]], align 4 1308 // CHECK2-NEXT: [[TMP40:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP39]] monotonic, align 4 1309 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1310 // CHECK2: .omp.reduction.default: 1311 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1312 // CHECK2: omp.precond.end: 1313 // CHECK2-NEXT: ret void 1314 // 1315 // 1316 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 1317 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { 1318 // CHECK2-NEXT: entry: 1319 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1320 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1321 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1322 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1323 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1324 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1325 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1326 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1327 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1328 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1329 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1330 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1331 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1332 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1333 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1334 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 1335 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 1336 // CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 1337 // CHECK2-NEXT: ret void 1338 // 1339 // 1340 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 1341 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1342 // CHECK2-NEXT: entry: 1343 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1344 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1345 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1346 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1347 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1348 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1349 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1350 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1351 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1352 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1353 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1354 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1355 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1356 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1357 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1358 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 1359 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 1360 // CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 1361 // CHECK2-NEXT: ret void 1362 // 1363 // 1364 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73 1365 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1366 // CHECK2-NEXT: entry: 1367 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1368 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1369 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1370 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1371 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1372 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1373 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1374 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1375 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 1376 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1377 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1378 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1379 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) 1380 // CHECK2-NEXT: ret void 1381 // 1382 // 1383 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3 1384 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1385 // CHECK2-NEXT: entry: 1386 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1387 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1388 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1389 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1390 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1391 // CHECK2-NEXT: [[OUTPUT2:%.*]] = alloca [3 x i32], align 4 1392 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 4 1393 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1394 // CHECK2-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 1395 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1396 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 1397 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1398 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1399 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1400 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1401 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1402 // CHECK2-NEXT: [[I6:%.*]] = alloca i32, align 4 1403 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1404 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1405 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1406 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1407 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1408 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1409 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1410 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1411 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 0 1412 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1413 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 2 1414 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[OUTPUT2]], i32 0, i32 0 1415 // CHECK2-NEXT: [[TMP2:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 3 1416 // CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP2]] 1417 // CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 1418 // CHECK2: omp.arrayinit.body: 1419 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 1420 // CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1421 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1422 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP2]] 1423 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 1424 // CHECK2: omp.arrayinit.done: 1425 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1426 // CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64 1427 // CHECK2-NEXT: [[TMP5:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1428 // CHECK2-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]] 1429 // CHECK2-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1430 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast [3 x i32]* [[OUTPUT2]] to i32* 1431 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP8]], i64 [[TMP7]] 1432 // CHECK2-NEXT: store i32* [[TMP9]], i32** [[TMP]], align 4 1433 // CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [3 x i32]* [[OUTPUT2]] to i32* 1434 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1435 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR_]], align 4 1436 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1437 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], 0 1438 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1439 // CHECK2-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 1440 // CHECK2-NEXT: store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_4]], align 4 1441 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1442 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1443 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP12]] 1444 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1445 // CHECK2: omp.precond.then: 1446 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1447 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 1448 // CHECK2-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_COMB_UB]], align 4 1449 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1450 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1451 // CHECK2-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1452 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4 1453 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP15]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1454 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1455 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 1456 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP16]], [[TMP17]] 1457 // CHECK2-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1458 // CHECK2: cond.true: 1459 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 1460 // CHECK2-NEXT: br label [[COND_END:%.*]] 1461 // CHECK2: cond.false: 1462 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1463 // CHECK2-NEXT: br label [[COND_END]] 1464 // CHECK2: cond.end: 1465 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP18]], [[COND_TRUE]] ], [ [[TMP19]], [[COND_FALSE]] ] 1466 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1467 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1468 // CHECK2-NEXT: store i32 [[TMP20]], i32* [[DOTOMP_IV]], align 4 1469 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1470 // CHECK2: omp.inner.for.cond: 1471 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1472 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1473 // CHECK2-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]] 1474 // CHECK2-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1475 // CHECK2: omp.inner.for.body: 1476 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1477 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1478 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1479 // CHECK2-NEXT: store i32 [[TMP25]], i32* [[SIZE_CASTED]], align 4 1480 // CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1481 // CHECK2-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP]], align 4 1482 // CHECK2-NEXT: [[TMP28:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1483 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP23]], i32 [[TMP24]], i32 [[TMP26]], i32* [[TMP27]], i32* [[TMP28]]) 1484 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1485 // CHECK2: omp.inner.for.inc: 1486 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1487 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1488 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], [[TMP30]] 1489 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 1490 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1491 // CHECK2: omp.inner.for.end: 1492 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1493 // CHECK2: omp.loop.exit: 1494 // CHECK2-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1495 // CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4 1496 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP32]]) 1497 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1498 // CHECK2: omp.precond.end: 1499 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1500 // CHECK2-NEXT: [[TMP34:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 1501 // CHECK2-NEXT: store i8* [[TMP34]], i8** [[TMP33]], align 4 1502 // CHECK2-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1503 // CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4 1504 // CHECK2-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1505 // CHECK2-NEXT: [[TMP38:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], i32 1, i32 4, i8* [[TMP37]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var) 1506 // CHECK2-NEXT: switch i32 [[TMP38]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1507 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1508 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1509 // CHECK2-NEXT: ] 1510 // CHECK2: .omp.reduction.case1: 1511 // CHECK2-NEXT: [[TMP39:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1512 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP39]] 1513 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE13:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1514 // CHECK2: omp.arraycpy.body: 1515 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1516 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST9:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1517 // CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], align 4 1518 // CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1519 // CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] 1520 // CHECK2-NEXT: store i32 [[ADD10]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], align 4 1521 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], i32 1 1522 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1523 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE12:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP39]] 1524 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE12]], label [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_BODY]] 1525 // CHECK2: omp.arraycpy.done13: 1526 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1527 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1528 // CHECK2: .omp.reduction.case2: 1529 // CHECK2-NEXT: [[TMP42:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1530 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY14:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP42]] 1531 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY14]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY15:%.*]] 1532 // CHECK2: omp.arraycpy.body15: 1533 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST16:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT19:%.*]], [[OMP_ARRAYCPY_BODY15]] ] 1534 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST17:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT18:%.*]], [[OMP_ARRAYCPY_BODY15]] ] 1535 // CHECK2-NEXT: [[TMP43:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST16]], align 4 1536 // CHECK2-NEXT: [[TMP44:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST17]], i32 [[TMP43]] monotonic, align 4 1537 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT18]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST17]], i32 1 1538 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT19]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST16]], i32 1 1539 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT18]], [[TMP42]] 1540 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY15]] 1541 // CHECK2: omp.arraycpy.done21: 1542 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1543 // CHECK2: .omp.reduction.default: 1544 // CHECK2-NEXT: ret void 1545 // 1546 // 1547 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 1548 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1549 // CHECK2-NEXT: entry: 1550 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1551 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1552 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4 1553 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4 1554 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1555 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1556 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1557 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1558 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1559 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1560 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1561 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1562 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1563 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1564 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1565 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1566 // CHECK2-NEXT: [[OUTPUT4:%.*]] = alloca [3 x i32], align 4 1567 // CHECK2-NEXT: [[_TMP5:%.*]] = alloca i32*, align 4 1568 // CHECK2-NEXT: [[I6:%.*]] = alloca i32, align 4 1569 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1570 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1571 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1572 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1573 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1574 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1575 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1576 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1577 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1578 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 1579 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1580 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0 1581 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1582 // CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 1583 // CHECK2-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1584 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1585 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1586 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]] 1587 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1588 // CHECK2: omp.precond.then: 1589 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1590 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1591 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4 1592 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1593 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1594 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_LB]], align 4 1595 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 1596 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1597 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1598 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1599 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 1600 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1601 // CHECK2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 2 1602 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[OUTPUT4]], i32 0, i32 0 1603 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 3 1604 // CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP8]] 1605 // CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 1606 // CHECK2: omp.arrayinit.body: 1607 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[OMP_PRECOND_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 1608 // CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1609 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1610 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] 1611 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 1612 // CHECK2: omp.arrayinit.done: 1613 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1614 // CHECK2-NEXT: [[TMP10:%.*]] = ptrtoint i32* [[TMP9]] to i64 1615 // CHECK2-NEXT: [[TMP11:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1616 // CHECK2-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]] 1617 // CHECK2-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1618 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast [3 x i32]* [[OUTPUT4]] to i32* 1619 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr i32, i32* [[TMP14]], i64 [[TMP13]] 1620 // CHECK2-NEXT: store i32* [[TMP15]], i32** [[_TMP5]], align 4 1621 // CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [3 x i32]* [[OUTPUT4]] to i32* 1622 // CHECK2-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1623 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4 1624 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP17]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1625 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1626 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1627 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP18]], [[TMP19]] 1628 // CHECK2-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1629 // CHECK2: cond.true: 1630 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1631 // CHECK2-NEXT: br label [[COND_END:%.*]] 1632 // CHECK2: cond.false: 1633 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1634 // CHECK2-NEXT: br label [[COND_END]] 1635 // CHECK2: cond.end: 1636 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP20]], [[COND_TRUE]] ], [ [[TMP21]], [[COND_FALSE]] ] 1637 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1638 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1639 // CHECK2-NEXT: store i32 [[TMP22]], i32* [[DOTOMP_IV]], align 4 1640 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1641 // CHECK2: omp.inner.for.cond: 1642 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1643 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1644 // CHECK2-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]] 1645 // CHECK2-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1646 // CHECK2: omp.inner.for.body: 1647 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1648 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP25]], 1 1649 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1650 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I6]], align 4 1651 // CHECK2-NEXT: [[TMP26:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1652 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[I6]], align 4 1653 // CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]] 1654 // CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 1655 // CHECK2-NEXT: [[TMP29:%.*]] = load i32*, i32** [[_TMP5]], align 4 1656 // CHECK2-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 0 1657 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX10]], align 4 1658 // CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP30]], [[TMP28]] 1659 // CHECK2-NEXT: store i32 [[ADD11]], i32* [[ARRAYIDX10]], align 4 1660 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1661 // CHECK2: omp.body.continue: 1662 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1663 // CHECK2: omp.inner.for.inc: 1664 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1665 // CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP31]], 1 1666 // CHECK2-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4 1667 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1668 // CHECK2: omp.inner.for.end: 1669 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1670 // CHECK2: omp.loop.exit: 1671 // CHECK2-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1672 // CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4 1673 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]]) 1674 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1675 // CHECK2-NEXT: [[TMP35:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 1676 // CHECK2-NEXT: store i8* [[TMP35]], i8** [[TMP34]], align 4 1677 // CHECK2-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1678 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4 1679 // CHECK2-NEXT: [[TMP38:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1680 // CHECK2-NEXT: [[TMP39:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP37]], i32 1, i32 4, i8* [[TMP38]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var) 1681 // CHECK2-NEXT: switch i32 [[TMP39]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1682 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1683 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1684 // CHECK2-NEXT: ] 1685 // CHECK2: .omp.reduction.case1: 1686 // CHECK2-NEXT: [[TMP40:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1687 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP40]] 1688 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE17:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1689 // CHECK2: omp.arraycpy.body: 1690 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1691 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST13:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT15:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1692 // CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], align 4 1693 // CHECK2-NEXT: [[TMP42:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1694 // CHECK2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP41]], [[TMP42]] 1695 // CHECK2-NEXT: store i32 [[ADD14]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], align 4 1696 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT15]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], i32 1 1697 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1698 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE16:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT15]], [[TMP40]] 1699 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE16]], label [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_BODY]] 1700 // CHECK2: omp.arraycpy.done17: 1701 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP37]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1702 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1703 // CHECK2: .omp.reduction.case2: 1704 // CHECK2-NEXT: [[TMP43:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1705 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY18:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP43]] 1706 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY18]], label [[OMP_ARRAYCPY_DONE25:%.*]], label [[OMP_ARRAYCPY_BODY19:%.*]] 1707 // CHECK2: omp.arraycpy.body19: 1708 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST20:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT23:%.*]], [[OMP_ARRAYCPY_BODY19]] ] 1709 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT22:%.*]], [[OMP_ARRAYCPY_BODY19]] ] 1710 // CHECK2-NEXT: [[TMP44:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 4 1711 // CHECK2-NEXT: [[TMP45:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 [[TMP44]] monotonic, align 4 1712 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT22]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1 1713 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT23]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], i32 1 1714 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE24:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT22]], [[TMP43]] 1715 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE24]], label [[OMP_ARRAYCPY_DONE25]], label [[OMP_ARRAYCPY_BODY19]] 1716 // CHECK2: omp.arraycpy.done25: 1717 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1718 // CHECK2: .omp.reduction.default: 1719 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1720 // CHECK2: omp.precond.end: 1721 // CHECK2-NEXT: ret void 1722 // 1723 // 1724 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5 1725 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1726 // CHECK2-NEXT: entry: 1727 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1728 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1729 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1730 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1731 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1732 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1733 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1734 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1735 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1736 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1737 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1738 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1739 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1740 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1741 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 3 1742 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 1743 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1744 // CHECK2: omp.arraycpy.body: 1745 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1746 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1747 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1748 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1749 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1750 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1751 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1752 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1753 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 1754 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 1755 // CHECK2: omp.arraycpy.done2: 1756 // CHECK2-NEXT: ret void 1757 // 1758 // 1759 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6 1760 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1761 // CHECK2-NEXT: entry: 1762 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1763 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1764 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1765 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1766 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1767 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1768 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1769 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1770 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1771 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1772 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1773 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1774 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1775 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1776 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 3 1777 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 1778 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1779 // CHECK2: omp.arraycpy.body: 1780 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1781 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1782 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1783 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1784 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1785 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1786 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1787 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1788 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 1789 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 1790 // CHECK2: omp.arraycpy.done2: 1791 // CHECK2-NEXT: ret void 1792 // 1793 // 1794 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78 1795 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR6:[0-9]+]] { 1796 // CHECK2-NEXT: entry: 1797 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1798 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1799 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1800 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1801 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1802 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1803 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1804 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[SIZE_CASTED]], align 4 1805 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1806 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP2]], [10 x i32]* [[TMP0]]) 1807 // CHECK2-NEXT: ret void 1808 // 1809 // 1810 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 1811 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR1]] { 1812 // CHECK2-NEXT: entry: 1813 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1814 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1815 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1816 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1817 // CHECK2-NEXT: [[A2:%.*]] = alloca [2 x i32], align 4 1818 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1819 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1820 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1821 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1822 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1823 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1824 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1825 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 0 1826 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 1 1827 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A2]], i32 0, i32 0 1828 // CHECK2-NEXT: [[TMP1:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 2 1829 // CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP1]] 1830 // CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 1831 // CHECK2: omp.arrayinit.body: 1832 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 1833 // CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1834 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1835 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP1]] 1836 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 1837 // CHECK2: omp.arrayinit.done: 1838 // CHECK2-NEXT: [[TMP2:%.*]] = bitcast [10 x i32]* [[TMP0]] to i32* 1839 // CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[TMP2]] to i64 1840 // CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1841 // CHECK2-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]] 1842 // CHECK2-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1843 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast [2 x i32]* [[A2]] to i32* 1844 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP7]], i64 [[TMP6]] 1845 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to [10 x i32]* 1846 // CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [2 x i32]* [[A2]] to i32* 1847 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1848 // CHECK2-NEXT: br label [[FOR_COND:%.*]] 1849 // CHECK2: for.cond: 1850 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 1851 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1852 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP10]], [[TMP11]] 1853 // CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 1854 // CHECK2: for.body: 1855 // CHECK2-NEXT: br label [[FOR_INC:%.*]] 1856 // CHECK2: for.inc: 1857 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4 1858 // CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1 1859 // CHECK2-NEXT: store i32 [[INC]], i32* [[I]], align 4 1860 // CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] 1861 // CHECK2: for.end: 1862 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1863 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 1864 // CHECK2-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 4 1865 // CHECK2-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1866 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4 1867 // CHECK2-NEXT: [[TMP17:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1868 // CHECK2-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32 1, i32 4, i8* [[TMP17]], void (i8*, i8*)* @.omp.reduction.reduction_func.10, [8 x i32]* @.gomp_critical_user_.reduction.var) 1869 // CHECK2-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1870 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1871 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1872 // CHECK2-NEXT: ] 1873 // CHECK2: .omp.reduction.case1: 1874 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 2 1875 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP19]] 1876 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1877 // CHECK2: omp.arraycpy.body: 1878 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1879 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1880 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 1881 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1882 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] 1883 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 1884 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 1885 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1886 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP19]] 1887 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 1888 // CHECK2: omp.arraycpy.done6: 1889 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1890 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1891 // CHECK2: .omp.reduction.case2: 1892 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 2 1893 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP22]] 1894 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 1895 // CHECK2: omp.arraycpy.body8: 1896 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 1897 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 1898 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 1899 // CHECK2-NEXT: [[TMP24:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP23]] monotonic, align 4 1900 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 1901 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 1902 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP22]] 1903 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 1904 // CHECK2: omp.arraycpy.done14: 1905 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1906 // CHECK2: .omp.reduction.default: 1907 // CHECK2-NEXT: ret void 1908 // 1909 // 1910 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.10 1911 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1912 // CHECK2-NEXT: entry: 1913 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1914 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1915 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1916 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1917 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1918 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1919 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1920 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1921 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1922 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1923 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1924 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1925 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1926 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1927 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 2 1928 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 1929 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1930 // CHECK2: omp.arraycpy.body: 1931 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1932 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1933 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1934 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1935 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1936 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1937 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1938 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1939 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 1940 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 1941 // CHECK2: omp.arraycpy.done2: 1942 // CHECK2-NEXT: ret void 1943 // 1944 // 1945 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81 1946 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR6]] { 1947 // CHECK2-NEXT: entry: 1948 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1949 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1950 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1951 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1952 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1953 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1954 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1955 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[SIZE_CASTED]], align 4 1956 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1957 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], [10 x i32]* [[TMP0]]) 1958 // CHECK2-NEXT: ret void 1959 // 1960 // 1961 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 1962 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR1]] { 1963 // CHECK2-NEXT: entry: 1964 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1965 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1966 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1967 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1968 // CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4 1969 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1970 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1971 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1972 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1973 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1974 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1975 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1976 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 3 1977 // CHECK2-NEXT: store i32 0, i32* [[A1]], align 4 1978 // CHECK2-NEXT: [[TMP1:%.*]] = bitcast [10 x i32]* [[TMP0]] to i32* 1979 // CHECK2-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[TMP1]] to i64 1980 // CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1981 // CHECK2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] 1982 // CHECK2-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1983 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[A1]], i64 [[TMP5]] 1984 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to [10 x i32]* 1985 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1986 // CHECK2-NEXT: br label [[FOR_COND:%.*]] 1987 // CHECK2: for.cond: 1988 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 1989 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1990 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] 1991 // CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 1992 // CHECK2: for.body: 1993 // CHECK2-NEXT: br label [[FOR_INC:%.*]] 1994 // CHECK2: for.inc: 1995 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 1996 // CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1 1997 // CHECK2-NEXT: store i32 [[INC]], i32* [[I]], align 4 1998 // CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 1999 // CHECK2: for.end: 2000 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 2001 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i32* [[A1]] to i8* 2002 // CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 4 2003 // CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 2004 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4 2005 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2006 // CHECK2-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 1, i32 4, i8* [[TMP15]], void (i8*, i8*)* @.omp.reduction.reduction_func.14, [8 x i32]* @.gomp_critical_user_.reduction.var) 2007 // CHECK2-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2008 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2009 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2010 // CHECK2-NEXT: ] 2011 // CHECK2: .omp.reduction.case1: 2012 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 2013 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[A1]], align 4 2014 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], [[TMP18]] 2015 // CHECK2-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX]], align 4 2016 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2017 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2018 // CHECK2: .omp.reduction.case2: 2019 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[A1]], align 4 2020 // CHECK2-NEXT: [[TMP20:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP19]] monotonic, align 4 2021 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2022 // CHECK2: .omp.reduction.default: 2023 // CHECK2-NEXT: ret void 2024 // 2025 // 2026 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.14 2027 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 2028 // CHECK2-NEXT: entry: 2029 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 2030 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 2031 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 2032 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 2033 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 2034 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2035 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 2036 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2037 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 2038 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 2039 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 2040 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 2041 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 2042 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 2043 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 2044 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 2045 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2046 // CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 2047 // CHECK2-NEXT: ret void 2048 // 2049 // 2050 // CHECK2-LABEL: define {{[^@]+}}@main 2051 // CHECK2-SAME: () #[[ATTR7:[0-9]+]] { 2052 // CHECK2-NEXT: entry: 2053 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 2054 // CHECK2-NEXT: [[SIZE:%.*]] = alloca i32, align 4 2055 // CHECK2-NEXT: [[ARRAY:%.*]] = alloca i32*, align 4 2056 // CHECK2-NEXT: [[RESULT:%.*]] = alloca i32, align 4 2057 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 2058 // CHECK2-NEXT: store i32 100, i32* [[SIZE]], align 4 2059 // CHECK2-NEXT: [[CALL:%.*]] = call noalias noundef nonnull i8* @_Znaj(i32 noundef 400) #[[ATTR10:[0-9]+]] 2060 // CHECK2-NEXT: [[TMP0:%.*]] = bitcast i8* [[CALL]] to i32* 2061 // CHECK2-NEXT: store i32* [[TMP0]], i32** [[ARRAY]], align 4 2062 // CHECK2-NEXT: store i32 0, i32* [[RESULT]], align 4 2063 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[ARRAY]], align 4 2064 // CHECK2-NEXT: call void @_Z3sumPiiS_(i32* noundef [[TMP1]], i32 noundef 100, i32* noundef [[RESULT]]) 2065 // CHECK2-NEXT: ret i32 0 2066 // 2067 // 2068 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 2069 // CHECK2-SAME: () #[[ATTR9:[0-9]+]] { 2070 // CHECK2-NEXT: entry: 2071 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1) 2072 // CHECK2-NEXT: ret void 2073 // 2074