1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-cuda-mode -x c++ \ 3 // RUN: -triple powerpc64le-unknown-unknown -DCUDA \ 4 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o \ 5 // RUN: %t-ppc-host.bc 6 7 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-cuda-mode -x c++ \ 8 // RUN: -triple nvptx64-unknown-unknown -DCUA \ 9 // RUN: -fopenmp-targets=nvptx64-nvidia-cuda -DCUDA -emit-llvm %s \ 10 // RUN: -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc \ 11 // RUN: -o - | FileCheck %s --check-prefix CHECK 12 13 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ \ 14 // RUN: -triple powerpc64le-unknown-unknown -DDIAG\ 15 // RUN: -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm \ 16 // RUN: %s -o - | FileCheck %s \ 17 // RUN: --check-prefix=CHECK1 18 19 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ \ 20 // RUN: -triple i386-unknown-unknown \ 21 // RUN: -fopenmp-targets=i386-pc-linux-gnu -emit-llvm \ 22 // RUN: %s -o - | FileCheck %s \ 23 // RUN: --check-prefix=CHECK2 24 25 26 #if defined(CUDA) 27 // expected-no-diagnostics 28 29 int foo(int n) { 30 double *e; 31 //no error and no implicit map generated for e[:1] 32 #pragma omp target parallel reduction(+: e[:1]) 33 *e=10; 34 ; 35 return 0; 36 } 37 // CHECK-NOT @.offload_maptypes 38 #elif defined(DIAG) 39 class S2 { 40 mutable int a; 41 public: 42 S2():a(0) { } 43 S2(S2 &s2):a(s2.a) { } 44 S2 &operator +(S2 &s); 45 }; 46 int bar() { 47 S2 o[5]; 48 //warnig "copyable and not guaranteed to be mapped correctly" and 49 //implicit map generated. 50 #pragma omp target parallel reduction(+:o[0]) //expected-warning {{Type 'S2' is not trivially copyable and not guaranteed to be mapped correctly}} 51 for (int i = 0; i < 10; i++); 52 double b[10][10][10]; 53 //no error no implicit map generated, the map for b is generated but not 54 //for b[0:2][2:4][1]. 55 #pragma omp target parallel for reduction(task, +: b[0:2][2:4][1]) 56 for (long long i = 0; i < 10; ++i); 57 return 0; 58 } 59 // map for variable o 60 // map for b: 61 #else 62 // expected-no-diagnostics 63 64 // generate implicit map for array elements or array sections in reduction 65 // clause. In following case: the implicit map is generate for output[0] 66 // with map size 4 and output[:3] with map size 12. 67 void sum(int* input, int size, int* output) 68 { 69 #pragma omp target teams distribute parallel for reduction(+: output[0]) \ 70 map(to: input [0:size]) 71 for (int i = 0; i < size; i++) 72 output[0] += input[i]; 73 #pragma omp target teams distribute parallel for reduction(+: output[:3]) \ 74 map(to: input [0:size]) 75 for (int i = 0; i < size; i++) 76 output[0] += input[i]; 77 int a[10]; 78 #pragma omp target parallel reduction(+: a[:2]) 79 for (int i = 0; i < size; i++) 80 ; 81 #pragma omp target parallel reduction(+: a[3]) 82 for (int i = 0; i < size; i++) 83 ; 84 } 85 #endif 86 int main() 87 { 88 #if defined(CUDA) 89 int a = foo(10); 90 #elif defined(DIAG) 91 int a = bar(); 92 #else 93 const int size = 100; 94 int *array = new int[size]; 95 int result = 0; 96 sum(array, size, &result); 97 #endif 98 return 0; 99 } 100 // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l32 101 // CHECK-SAME: (double* noundef [[E:%.*]]) #[[ATTR0:[0-9]+]] { 102 // CHECK-NEXT: entry: 103 // CHECK-NEXT: [[E_ADDR:%.*]] = alloca double*, align 8 104 // CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 105 // CHECK-NEXT: store double* [[E]], double** [[E_ADDR]], align 8 106 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true) 107 // CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 108 // CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 109 // CHECK: user_code.entry: 110 // CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 111 // CHECK-NEXT: [[TMP2:%.*]] = load double*, double** [[E_ADDR]], align 8 112 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 113 // CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[TMP2]] to i8* 114 // CHECK-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8 115 // CHECK-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 116 // CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, double*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP5]], i64 1) 117 // CHECK-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) 118 // CHECK-NEXT: ret void 119 // CHECK: worker.exit: 120 // CHECK-NEXT: ret void 121 // 122 // 123 // CHECK-LABEL: define {{[^@]+}}@__omp_outlined__ 124 // CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], double* noundef [[E:%.*]]) #[[ATTR1:[0-9]+]] { 125 // CHECK-NEXT: entry: 126 // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 127 // CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 128 // CHECK-NEXT: [[E_ADDR:%.*]] = alloca double*, align 8 129 // CHECK-NEXT: [[E2:%.*]] = alloca double, align 8 130 // CHECK-NEXT: [[TMP:%.*]] = alloca double*, align 8 131 // CHECK-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 132 // CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 133 // CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 134 // CHECK-NEXT: store double* [[E]], double** [[E_ADDR]], align 8 135 // CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[E_ADDR]], align 8 136 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP0]], i64 0 137 // CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[E_ADDR]], align 8 138 // CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[TMP1]], i64 0 139 // CHECK-NEXT: store double 0.000000e+00, double* [[E2]], align 8 140 // CHECK-NEXT: [[TMP2:%.*]] = load double*, double** [[E_ADDR]], align 8 141 // CHECK-NEXT: [[TMP3:%.*]] = ptrtoint double* [[TMP2]] to i64 142 // CHECK-NEXT: [[TMP4:%.*]] = ptrtoint double* [[ARRAYIDX]] to i64 143 // CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]] 144 // CHECK-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 145 // CHECK-NEXT: [[TMP7:%.*]] = getelementptr double, double* [[E2]], i64 [[TMP6]] 146 // CHECK-NEXT: store double* [[TMP7]], double** [[TMP]], align 8 147 // CHECK-NEXT: [[TMP8:%.*]] = load double*, double** [[TMP]], align 8 148 // CHECK-NEXT: store double 1.000000e+01, double* [[TMP8]], align 8 149 // CHECK-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 150 // CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 151 // CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 152 // CHECK-NEXT: [[TMP12:%.*]] = bitcast double* [[E2]] to i8* 153 // CHECK-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8 154 // CHECK-NEXT: [[TMP13:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 155 // CHECK-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(%struct.ident_t* @[[GLOB2]], i32 [[TMP10]], i32 1, i64 8, i8* [[TMP13]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func) 156 // CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[TMP14]], 1 157 // CHECK-NEXT: br i1 [[TMP15]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]] 158 // CHECK: .omp.reduction.then: 159 // CHECK-NEXT: [[TMP16:%.*]] = load double, double* [[ARRAYIDX]], align 8 160 // CHECK-NEXT: [[TMP17:%.*]] = load double, double* [[E2]], align 8 161 // CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP16]], [[TMP17]] 162 // CHECK-NEXT: store double [[ADD]], double* [[ARRAYIDX]], align 8 163 // CHECK-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP10]]) 164 // CHECK-NEXT: br label [[DOTOMP_REDUCTION_DONE]] 165 // CHECK: .omp.reduction.done: 166 // CHECK-NEXT: ret void 167 // 168 // 169 // CHECK-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func 170 // CHECK-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] { 171 // CHECK-NEXT: entry: 172 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 173 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2 174 // CHECK-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2 175 // CHECK-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2 176 // CHECK-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x i8*], align 8 177 // CHECK-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8 178 // CHECK-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 179 // CHECK-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2 180 // CHECK-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2 181 // CHECK-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2 182 // CHECK-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 8 183 // CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 184 // CHECK-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2 185 // CHECK-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2 186 // CHECK-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2 187 // CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 188 // CHECK-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to double** 189 // CHECK-NEXT: [[TMP11:%.*]] = load double*, double** [[TMP10]], align 8 190 // CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 191 // CHECK-NEXT: [[TMP13:%.*]] = getelementptr double, double* [[TMP11]], i64 1 192 // CHECK-NEXT: [[TMP14:%.*]] = bitcast double* [[TMP13]] to i8* 193 // CHECK-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP11]] to i64* 194 // CHECK-NEXT: [[TMP16:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i64* 195 // CHECK-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP15]], align 8 196 // CHECK-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_get_warp_size() 197 // CHECK-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16 198 // CHECK-NEXT: [[TMP20:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP17]], i16 [[TMP7]], i16 [[TMP19]]) 199 // CHECK-NEXT: store i64 [[TMP20]], i64* [[TMP16]], align 8 200 // CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, i64* [[TMP15]], i64 1 201 // CHECK-NEXT: [[TMP22:%.*]] = getelementptr i64, i64* [[TMP16]], i64 1 202 // CHECK-NEXT: [[TMP23:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i8* 203 // CHECK-NEXT: store i8* [[TMP23]], i8** [[TMP12]], align 8 204 // CHECK-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP8]], 0 205 // CHECK-NEXT: [[TMP25:%.*]] = icmp eq i16 [[TMP8]], 1 206 // CHECK-NEXT: [[TMP26:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]] 207 // CHECK-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]] 208 // CHECK-NEXT: [[TMP28:%.*]] = icmp eq i16 [[TMP8]], 2 209 // CHECK-NEXT: [[TMP29:%.*]] = and i16 [[TMP6]], 1 210 // CHECK-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP29]], 0 211 // CHECK-NEXT: [[TMP31:%.*]] = and i1 [[TMP28]], [[TMP30]] 212 // CHECK-NEXT: [[TMP32:%.*]] = icmp sgt i16 [[TMP7]], 0 213 // CHECK-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]] 214 // CHECK-NEXT: [[TMP34:%.*]] = or i1 [[TMP24]], [[TMP27]] 215 // CHECK-NEXT: [[TMP35:%.*]] = or i1 [[TMP34]], [[TMP33]] 216 // CHECK-NEXT: br i1 [[TMP35]], label [[THEN:%.*]], label [[ELSE:%.*]] 217 // CHECK: then: 218 // CHECK-NEXT: [[TMP36:%.*]] = bitcast [1 x i8*]* [[TMP5]] to i8* 219 // CHECK-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8* 220 // CHECK-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP36]], i8* [[TMP37]]) #[[ATTR3:[0-9]+]] 221 // CHECK-NEXT: br label [[IFCONT:%.*]] 222 // CHECK: else: 223 // CHECK-NEXT: br label [[IFCONT]] 224 // CHECK: ifcont: 225 // CHECK-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1 226 // CHECK-NEXT: [[TMP39:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]] 227 // CHECK-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]] 228 // CHECK-NEXT: br i1 [[TMP40]], label [[THEN4:%.*]], label [[ELSE5:%.*]] 229 // CHECK: then4: 230 // CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 231 // CHECK-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to double** 232 // CHECK-NEXT: [[TMP43:%.*]] = load double*, double** [[TMP42]], align 8 233 // CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 234 // CHECK-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to double** 235 // CHECK-NEXT: [[TMP46:%.*]] = load double*, double** [[TMP45]], align 8 236 // CHECK-NEXT: [[TMP47:%.*]] = load double, double* [[TMP43]], align 8 237 // CHECK-NEXT: store double [[TMP47]], double* [[TMP46]], align 8 238 // CHECK-NEXT: br label [[IFCONT6:%.*]] 239 // CHECK: else5: 240 // CHECK-NEXT: br label [[IFCONT6]] 241 // CHECK: ifcont6: 242 // CHECK-NEXT: ret void 243 // 244 // 245 // CHECK-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func 246 // CHECK-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] { 247 // CHECK-NEXT: entry: 248 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 249 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 250 // CHECK-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4 251 // CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 252 // CHECK-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 253 // CHECK-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 254 // CHECK-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() 255 // CHECK-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() 256 // CHECK-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31 257 // CHECK-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() 258 // CHECK-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5 259 // CHECK-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8 260 // CHECK-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [1 x i8*]* 261 // CHECK-NEXT: store i32 0, i32* [[DOTCNT_ADDR]], align 4 262 // CHECK-NEXT: br label [[PRECOND:%.*]] 263 // CHECK: precond: 264 // CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCNT_ADDR]], align 4 265 // CHECK-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 2 266 // CHECK-NEXT: br i1 [[TMP9]], label [[BODY:%.*]], label [[EXIT:%.*]] 267 // CHECK: body: 268 // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) 269 // CHECK-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0 270 // CHECK-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]] 271 // CHECK: then: 272 // CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0 273 // CHECK-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP10]], align 8 274 // CHECK-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* 275 // CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP12]], i32 [[TMP8]] 276 // CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]] 277 // CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]], align 4 278 // CHECK-NEXT: store volatile i32 [[TMP15]], i32 addrspace(3)* [[TMP14]], align 4 279 // CHECK-NEXT: br label [[IFCONT:%.*]] 280 // CHECK: else: 281 // CHECK-NEXT: br label [[IFCONT]] 282 // CHECK: ifcont: 283 // CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]) 284 // CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTADDR1]], align 4 285 // CHECK-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP16]] 286 // CHECK-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]] 287 // CHECK: then2: 288 // CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]] 289 // CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0 290 // CHECK-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8 291 // CHECK-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32* 292 // CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP20]], i32 [[TMP8]] 293 // CHECK-NEXT: [[TMP22:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP17]], align 4 294 // CHECK-NEXT: store i32 [[TMP22]], i32* [[TMP21]], align 4 295 // CHECK-NEXT: br label [[IFCONT4:%.*]] 296 // CHECK: else3: 297 // CHECK-NEXT: br label [[IFCONT4]] 298 // CHECK: ifcont4: 299 // CHECK-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP8]], 1 300 // CHECK-NEXT: store i32 [[TMP23]], i32* [[DOTCNT_ADDR]], align 4 301 // CHECK-NEXT: br label [[PRECOND]] 302 // CHECK: exit: 303 // CHECK-NEXT: ret void 304 // 305 // 306 // CHECK1-LABEL: define {{[^@]+}}@_Z3barv 307 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 308 // CHECK1-NEXT: entry: 309 // CHECK1-NEXT: [[O:%.*]] = alloca [5 x %class.S2], align 4 310 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 311 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 312 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 313 // CHECK1-NEXT: [[B:%.*]] = alloca [10 x [10 x [10 x double]]], align 8 314 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8 315 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8 316 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8 317 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[O]], i32 0, i32 0 318 // CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[ARRAY_BEGIN]], i64 5 319 // CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] 320 // CHECK1: arrayctor.loop: 321 // CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %class.S2* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ] 322 // CHECK1-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]]) 323 // CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S2]], %class.S2* [[ARRAYCTOR_CUR]], i64 1 324 // CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S2* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] 325 // CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] 326 // CHECK1: arrayctor.cont: 327 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[O]], i64 0, i64 0 328 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 329 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [5 x %class.S2]** 330 // CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[TMP1]], align 8 331 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 332 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to %class.S2** 333 // CHECK1-NEXT: store %class.S2* [[ARRAYIDX]], %class.S2** [[TMP3]], align 8 334 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 335 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 336 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 337 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 338 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 339 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 340 // CHECK1-NEXT: store i32 1, i32* [[TMP7]], align 4 341 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 342 // CHECK1-NEXT: store i32 1, i32* [[TMP8]], align 4 343 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 344 // CHECK1-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 8 345 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 346 // CHECK1-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 8 347 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 348 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP11]], align 8 349 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 350 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP12]], align 8 351 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 352 // CHECK1-NEXT: store i8** null, i8*** [[TMP13]], align 8 353 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 354 // CHECK1-NEXT: store i8** null, i8*** [[TMP14]], align 8 355 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 356 // CHECK1-NEXT: store i64 0, i64* [[TMP15]], align 8 357 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 358 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 359 // CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 360 // CHECK1: omp_offload.failed: 361 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50([5 x %class.S2]* [[O]]) #[[ATTR8:[0-9]+]] 362 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 363 // CHECK1: omp_offload.cont: 364 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 365 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to [10 x [10 x [10 x double]]]** 366 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[TMP19]], align 8 367 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 368 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to [10 x [10 x [10 x double]]]** 369 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[TMP21]], align 8 370 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0 371 // CHECK1-NEXT: store i8* null, i8** [[TMP22]], align 8 372 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 373 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 374 // CHECK1-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 375 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0 376 // CHECK1-NEXT: store i32 1, i32* [[TMP25]], align 4 377 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1 378 // CHECK1-NEXT: store i32 1, i32* [[TMP26]], align 4 379 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2 380 // CHECK1-NEXT: store i8** [[TMP23]], i8*** [[TMP27]], align 8 381 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3 382 // CHECK1-NEXT: store i8** [[TMP24]], i8*** [[TMP28]], align 8 383 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4 384 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.3, i32 0, i32 0), i64** [[TMP29]], align 8 385 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5 386 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i64** [[TMP30]], align 8 387 // CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6 388 // CHECK1-NEXT: store i8** null, i8*** [[TMP31]], align 8 389 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7 390 // CHECK1-NEXT: store i8** null, i8*** [[TMP32]], align 8 391 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8 392 // CHECK1-NEXT: store i64 0, i64* [[TMP33]], align 8 393 // CHECK1-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB2]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]]) 394 // CHECK1-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 395 // CHECK1-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 396 // CHECK1: omp_offload.failed5: 397 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55([10 x [10 x [10 x double]]]* [[B]]) #[[ATTR8]] 398 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT6]] 399 // CHECK1: omp_offload.cont6: 400 // CHECK1-NEXT: ret i32 0 401 // 402 // 403 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S2C1Ev 404 // CHECK1-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { 405 // CHECK1-NEXT: entry: 406 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8 407 // CHECK1-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8 408 // CHECK1-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8 409 // CHECK1-NEXT: call void @_ZN2S2C2Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) 410 // CHECK1-NEXT: ret void 411 // 412 // 413 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50 414 // CHECK1-SAME: ([5 x %class.S2]* noundef nonnull align 4 dereferenceable(20) [[O:%.*]]) #[[ATTR2:[0-9]+]] { 415 // CHECK1-NEXT: entry: 416 // CHECK1-NEXT: [[O_ADDR:%.*]] = alloca [5 x %class.S2]*, align 8 417 // CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[O_ADDR]], align 8 418 // CHECK1-NEXT: [[TMP0:%.*]] = load [5 x %class.S2]*, [5 x %class.S2]** [[O_ADDR]], align 8 419 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [5 x %class.S2]*)* @.omp_outlined. to void (i32*, i32*, ...)*), [5 x %class.S2]* [[TMP0]]) 420 // CHECK1-NEXT: ret void 421 // 422 // 423 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 424 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [5 x %class.S2]* noundef nonnull align 4 dereferenceable(20) [[O:%.*]]) #[[ATTR3:[0-9]+]] { 425 // CHECK1-NEXT: entry: 426 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 427 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 428 // CHECK1-NEXT: [[O_ADDR:%.*]] = alloca [5 x %class.S2]*, align 8 429 // CHECK1-NEXT: [[O1:%.*]] = alloca [[CLASS_S2:%.*]], align 4 430 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 431 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 432 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 433 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 434 // CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[O_ADDR]], align 8 435 // CHECK1-NEXT: [[TMP0:%.*]] = load [5 x %class.S2]*, [5 x %class.S2]** [[O_ADDR]], align 8 436 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[TMP0]], i64 0, i64 0 437 // CHECK1-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) 438 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast [5 x %class.S2]* [[TMP0]] to %class.S2* 439 // CHECK1-NEXT: [[TMP2:%.*]] = ptrtoint %class.S2* [[TMP1]] to i64 440 // CHECK1-NEXT: [[TMP3:%.*]] = ptrtoint %class.S2* [[ARRAYIDX]] to i64 441 // CHECK1-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] 442 // CHECK1-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (%class.S2* getelementptr ([[CLASS_S2]], %class.S2* null, i32 1) to i64) 443 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr [[CLASS_S2]], %class.S2* [[O1]], i64 [[TMP5]] 444 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast %class.S2* [[TMP6]] to [5 x %class.S2]* 445 // CHECK1-NEXT: store i32 0, i32* [[I]], align 4 446 // CHECK1-NEXT: br label [[FOR_COND:%.*]] 447 // CHECK1: for.cond: 448 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 449 // CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], 10 450 // CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 451 // CHECK1: for.body: 452 // CHECK1-NEXT: br label [[FOR_INC:%.*]] 453 // CHECK1: for.inc: 454 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4 455 // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1 456 // CHECK1-NEXT: store i32 [[INC]], i32* [[I]], align 4 457 // CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] 458 // CHECK1: for.end: 459 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 460 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast %class.S2* [[O1]] to i8* 461 // CHECK1-NEXT: store i8* [[TMP11]], i8** [[TMP10]], align 8 462 // CHECK1-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 463 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 464 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 465 // CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP13]], i32 1, i64 8, i8* [[TMP14]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 466 // CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 467 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 468 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 469 // CHECK1-NEXT: ] 470 // CHECK1: .omp.reduction.case1: 471 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYIDX]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) 472 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast %class.S2* [[ARRAYIDX]] to i8* 473 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast %class.S2* [[CALL]] to i8* 474 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false) 475 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP13]], [8 x i32]* @.gomp_critical_user_.reduction.var) 476 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 477 // CHECK1: .omp.reduction.case2: 478 // CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 479 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4 480 // CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 481 // CHECK1-NEXT: [[CALL2:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYIDX]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) 482 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast %class.S2* [[ARRAYIDX]] to i8* 483 // CHECK1-NEXT: [[TMP21:%.*]] = bitcast %class.S2* [[CALL2]] to i8* 484 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP20]], i8* align 4 [[TMP21]], i64 4, i1 false) 485 // CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) 486 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 487 // CHECK1: .omp.reduction.default: 488 // CHECK1-NEXT: ret void 489 // 490 // 491 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 492 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] { 493 // CHECK1-NEXT: entry: 494 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 495 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 496 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 497 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 498 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 499 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 500 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 501 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 502 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 503 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 504 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %class.S2* 505 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 506 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 507 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %class.S2* 508 // CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[TMP11]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[TMP8]]) 509 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast %class.S2* [[TMP11]] to i8* 510 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast %class.S2* [[CALL]] to i8* 511 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i64 4, i1 false) 512 // CHECK1-NEXT: ret void 513 // 514 // 515 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55 516 // CHECK1-SAME: ([10 x [10 x [10 x double]]]* noundef nonnull align 8 dereferenceable(8000) [[B:%.*]]) #[[ATTR2]] { 517 // CHECK1-NEXT: entry: 518 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x [10 x [10 x double]]]*, align 8 519 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 520 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x [10 x [10 x double]]]*, [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 521 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x [10 x [10 x double]]]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [10 x [10 x [10 x double]]]* [[TMP0]]) 522 // CHECK1-NEXT: ret void 523 // 524 // 525 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 526 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x [10 x [10 x double]]]* noundef nonnull align 8 dereferenceable(8000) [[B:%.*]]) #[[ATTR3]] { 527 // CHECK1-NEXT: entry: 528 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 529 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 530 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x [10 x [10 x double]]]*, align 8 531 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 532 // CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 533 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 534 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 535 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 536 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 537 // CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 538 // CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 539 // CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [1 x %struct.kmp_taskred_input_t], align 8 540 // CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8 541 // CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8 542 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8 543 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 544 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 545 // CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 546 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x [10 x [10 x double]]]*, [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 547 // CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 548 // CHECK1-NEXT: store i64 9, i64* [[DOTOMP_UB]], align 8 549 // CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 550 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 551 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 0 552 // CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX]], i64 0, i64 0 553 // CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY]], i64 2 554 // CHECK1-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX1]], i64 0, i64 0 555 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY2]], i64 1 556 // CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 1 557 // CHECK1-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX4]], i64 0, i64 0 558 // CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY5]], i64 5 559 // CHECK1-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX6]], i64 0, i64 0 560 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY7]], i64 1 561 // CHECK1-NEXT: [[TMP1:%.*]] = ptrtoint double* [[ARRAYIDX8]] to i64 562 // CHECK1-NEXT: [[TMP2:%.*]] = ptrtoint double* [[ARRAYIDX3]] to i64 563 // CHECK1-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], [[TMP2]] 564 // CHECK1-NEXT: [[TMP4:%.*]] = sdiv exact i64 [[TMP3]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 565 // CHECK1-NEXT: [[TMP5:%.*]] = add nuw i64 [[TMP4]], 1 566 // CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 567 // CHECK1-NEXT: [[TMP7:%.*]] = call i8* @llvm.stacksave() 568 // CHECK1-NEXT: store i8* [[TMP7]], i8** [[SAVED_STACK]], align 8 569 // CHECK1-NEXT: [[VLA:%.*]] = alloca double, i64 [[TMP5]], align 8 570 // CHECK1-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR0]], align 8 571 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr double, double* [[VLA]], i64 [[TMP5]] 572 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq double* [[VLA]], [[TMP8]] 573 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 574 // CHECK1: omp.arrayinit.body: 575 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 576 // CHECK1-NEXT: store double 0.000000e+00, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 577 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 578 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] 579 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 580 // CHECK1: omp.arrayinit.done: 581 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast [10 x [10 x [10 x double]]]* [[TMP0]] to double* 582 // CHECK1-NEXT: [[TMP10:%.*]] = ptrtoint double* [[TMP9]] to i64 583 // CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint double* [[ARRAYIDX3]] to i64 584 // CHECK1-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]] 585 // CHECK1-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 586 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr double, double* [[VLA]], i64 [[TMP13]] 587 // CHECK1-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP14]] to [10 x [10 x [10 x double]]]* 588 // CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [1 x %struct.kmp_taskred_input_t], [1 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0 589 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0 590 // CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 0 591 // CHECK1-NEXT: [[ARRAYDECAY10:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX9]], i64 0, i64 0 592 // CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY10]], i64 2 593 // CHECK1-NEXT: [[ARRAYDECAY12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i64 0, i64 0 594 // CHECK1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY12]], i64 1 595 // CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 1 596 // CHECK1-NEXT: [[ARRAYDECAY15:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX14]], i64 0, i64 0 597 // CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY15]], i64 5 598 // CHECK1-NEXT: [[ARRAYDECAY17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 0 599 // CHECK1-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY17]], i64 1 600 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast double* [[VLA]] to i8* 601 // CHECK1-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8 602 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1 603 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast double* [[ARRAYIDX13]] to i8* 604 // CHECK1-NEXT: store i8* [[TMP19]], i8** [[TMP18]], align 8 605 // CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint double* [[ARRAYIDX18]] to i64 606 // CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint double* [[ARRAYIDX13]] to i64 607 // CHECK1-NEXT: [[TMP22:%.*]] = sub i64 [[TMP20]], [[TMP21]] 608 // CHECK1-NEXT: [[TMP23:%.*]] = sdiv exact i64 [[TMP22]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 609 // CHECK1-NEXT: [[TMP24:%.*]] = add nuw i64 [[TMP23]], 1 610 // CHECK1-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) 611 // CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2 612 // CHECK1-NEXT: store i64 [[TMP25]], i64* [[TMP26]], align 8 613 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3 614 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP27]], align 8 615 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4 616 // CHECK1-NEXT: store i8* null, i8** [[TMP28]], align 8 617 // CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5 618 // CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP29]], align 8 619 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6 620 // CHECK1-NEXT: store i32 1, i32* [[TMP30]], align 8 621 // CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 622 // CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4 623 // CHECK1-NEXT: [[TMP33:%.*]] = bitcast [1 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8* 624 // CHECK1-NEXT: [[TMP34:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB2]], i32 [[TMP32]], i32 1, i32 1, i8* [[TMP33]]) 625 // CHECK1-NEXT: store i8* [[TMP34]], i8** [[DOTTASK_RED_]], align 8 626 // CHECK1-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 627 // CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4 628 // CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP36]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) 629 // CHECK1-NEXT: [[TMP37:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 630 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP37]], 9 631 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 632 // CHECK1: cond.true: 633 // CHECK1-NEXT: br label [[COND_END:%.*]] 634 // CHECK1: cond.false: 635 // CHECK1-NEXT: [[TMP38:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 636 // CHECK1-NEXT: br label [[COND_END]] 637 // CHECK1: cond.end: 638 // CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP38]], [[COND_FALSE]] ] 639 // CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 640 // CHECK1-NEXT: [[TMP39:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 641 // CHECK1-NEXT: store i64 [[TMP39]], i64* [[DOTOMP_IV]], align 8 642 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 643 // CHECK1: omp.inner.for.cond: 644 // CHECK1-NEXT: [[TMP40:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 645 // CHECK1-NEXT: [[TMP41:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 646 // CHECK1-NEXT: [[CMP19:%.*]] = icmp sle i64 [[TMP40]], [[TMP41]] 647 // CHECK1-NEXT: br i1 [[CMP19]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] 648 // CHECK1: omp.inner.for.cond.cleanup: 649 // CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 650 // CHECK1: omp.inner.for.body: 651 // CHECK1-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 652 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP42]], 1 653 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] 654 // CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8 655 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 656 // CHECK1: omp.body.continue: 657 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 658 // CHECK1: omp.inner.for.inc: 659 // CHECK1-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 660 // CHECK1-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP43]], 1 661 // CHECK1-NEXT: store i64 [[ADD20]], i64* [[DOTOMP_IV]], align 8 662 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 663 // CHECK1: omp.inner.for.end: 664 // CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 665 // CHECK1: omp.loop.exit: 666 // CHECK1-NEXT: [[TMP44:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 667 // CHECK1-NEXT: [[TMP45:%.*]] = load i32, i32* [[TMP44]], align 4 668 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP45]]) 669 // CHECK1-NEXT: [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 670 // CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4 671 // CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP47]], i32 1) 672 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 673 // CHECK1-NEXT: [[TMP49:%.*]] = bitcast double* [[VLA]] to i8* 674 // CHECK1-NEXT: store i8* [[TMP49]], i8** [[TMP48]], align 8 675 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 676 // CHECK1-NEXT: [[TMP51:%.*]] = inttoptr i64 [[TMP5]] to i8* 677 // CHECK1-NEXT: store i8* [[TMP51]], i8** [[TMP50]], align 8 678 // CHECK1-NEXT: [[TMP52:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 679 // CHECK1-NEXT: [[TMP53:%.*]] = load i32, i32* [[TMP52]], align 4 680 // CHECK1-NEXT: [[TMP54:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 681 // CHECK1-NEXT: [[TMP55:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP53]], i32 1, i64 16, i8* [[TMP54]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) 682 // CHECK1-NEXT: switch i32 [[TMP55]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 683 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 684 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 685 // CHECK1-NEXT: ] 686 // CHECK1: .omp.reduction.case1: 687 // CHECK1-NEXT: [[TMP56:%.*]] = getelementptr double, double* [[ARRAYIDX3]], i64 [[TMP5]] 688 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[ARRAYIDX3]], [[TMP56]] 689 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE25:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 690 // CHECK1: omp.arraycpy.body: 691 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 692 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi double* [ [[ARRAYIDX3]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT23:%.*]], [[OMP_ARRAYCPY_BODY]] ] 693 // CHECK1-NEXT: [[TMP57:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], align 8 694 // CHECK1-NEXT: [[TMP58:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 695 // CHECK1-NEXT: [[ADD22:%.*]] = fadd double [[TMP57]], [[TMP58]] 696 // CHECK1-NEXT: store double [[ADD22]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], align 8 697 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT23]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1 698 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 699 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE24:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT23]], [[TMP56]] 700 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE24]], label [[OMP_ARRAYCPY_DONE25]], label [[OMP_ARRAYCPY_BODY]] 701 // CHECK1: omp.arraycpy.done25: 702 // CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP53]], [8 x i32]* @.gomp_critical_user_.reduction.var) 703 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 704 // CHECK1: .omp.reduction.case2: 705 // CHECK1-NEXT: [[TMP59:%.*]] = getelementptr double, double* [[ARRAYIDX3]], i64 [[TMP5]] 706 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY26:%.*]] = icmp eq double* [[ARRAYIDX3]], [[TMP59]] 707 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY26]], label [[OMP_ARRAYCPY_DONE33:%.*]], label [[OMP_ARRAYCPY_BODY27:%.*]] 708 // CHECK1: omp.arraycpy.body27: 709 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST28:%.*]] = phi double* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT31:%.*]], [[OMP_ARRAYCPY_BODY27]] ] 710 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST29:%.*]] = phi double* [ [[ARRAYIDX3]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT30:%.*]], [[OMP_ARRAYCPY_BODY27]] ] 711 // CHECK1-NEXT: [[TMP60:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], align 8 712 // CHECK1-NEXT: [[TMP61:%.*]] = atomicrmw fadd double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]], double [[TMP60]] monotonic, align 8 713 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT30]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]], i32 1 714 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT31]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], i32 1 715 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE32:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT30]], [[TMP59]] 716 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE32]], label [[OMP_ARRAYCPY_DONE33]], label [[OMP_ARRAYCPY_BODY27]] 717 // CHECK1: omp.arraycpy.done33: 718 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 719 // CHECK1: .omp.reduction.default: 720 // CHECK1-NEXT: [[TMP62:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 721 // CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP62]]) 722 // CHECK1-NEXT: ret void 723 // 724 // 725 // CHECK1-LABEL: define {{[^@]+}}@.red_init. 726 // CHECK1-SAME: (i8* noalias noundef [[TMP0:%.*]], i8* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] { 727 // CHECK1-NEXT: entry: 728 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 729 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 730 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 731 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 732 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast i8** [[DOTADDR]] to double** 733 // CHECK1-NEXT: [[TMP3:%.*]] = load double*, double** [[TMP2]], align 8 734 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* @{{reduction_size[.].+[.]}}, align 8 735 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr double, double* [[TMP3]], i64 [[TMP4]] 736 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq double* [[TMP3]], [[TMP5]] 737 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 738 // CHECK1: omp.arrayinit.body: 739 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 740 // CHECK1-NEXT: store double 0.000000e+00, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 741 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 742 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP5]] 743 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 744 // CHECK1: omp.arrayinit.done: 745 // CHECK1-NEXT: ret void 746 // 747 // 748 // CHECK1-LABEL: define {{[^@]+}}@.red_comb. 749 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] { 750 // CHECK1-NEXT: entry: 751 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 752 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 753 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 754 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 755 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* @{{reduction_size[.].+[.]}}, align 8 756 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[DOTADDR]] to double** 757 // CHECK1-NEXT: [[TMP4:%.*]] = load double*, double** [[TMP3]], align 8 758 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8** [[DOTADDR1]] to double** 759 // CHECK1-NEXT: [[TMP6:%.*]] = load double*, double** [[TMP5]], align 8 760 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr double, double* [[TMP4]], i64 [[TMP2]] 761 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[TMP4]], [[TMP7]] 762 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 763 // CHECK1: omp.arraycpy.body: 764 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[TMP6]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 765 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 766 // CHECK1-NEXT: [[TMP8:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 767 // CHECK1-NEXT: [[TMP9:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 768 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP8]], [[TMP9]] 769 // CHECK1-NEXT: store double [[ADD]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 770 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 771 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 772 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP7]] 773 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 774 // CHECK1: omp.arraycpy.done2: 775 // CHECK1-NEXT: ret void 776 // 777 // 778 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 779 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] { 780 // CHECK1-NEXT: entry: 781 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 782 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 783 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 784 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 785 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 786 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]* 787 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 788 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]* 789 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0 790 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 791 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double* 792 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0 793 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 794 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double* 795 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1 796 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8 797 // CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint i8* [[TMP13]] to i64 798 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr double, double* [[TMP11]], i64 [[TMP14]] 799 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[TMP11]], [[TMP15]] 800 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 801 // CHECK1: omp.arraycpy.body: 802 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 803 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 804 // CHECK1-NEXT: [[TMP16:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 805 // CHECK1-NEXT: [[TMP17:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 806 // CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP16]], [[TMP17]] 807 // CHECK1-NEXT: store double [[ADD]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 808 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 809 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 810 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP15]] 811 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 812 // CHECK1: omp.arraycpy.done2: 813 // CHECK1-NEXT: ret void 814 // 815 // 816 // CHECK1-LABEL: define {{[^@]+}}@_ZN2S2C2Ev 817 // CHECK1-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 { 818 // CHECK1-NEXT: entry: 819 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8 820 // CHECK1-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8 821 // CHECK1-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8 822 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[THIS1]], i32 0, i32 0 823 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 824 // CHECK1-NEXT: ret void 825 // 826 // 827 // CHECK1-LABEL: define {{[^@]+}}@main 828 // CHECK1-SAME: () #[[ATTR10:[0-9]+]] { 829 // CHECK1-NEXT: entry: 830 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 831 // CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 832 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 833 // CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3barv() 834 // CHECK1-NEXT: store i32 [[CALL]], i32* [[A]], align 4 835 // CHECK1-NEXT: ret i32 0 836 // 837 // 838 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 839 // CHECK1-SAME: () #[[ATTR11:[0-9]+]] { 840 // CHECK1-NEXT: entry: 841 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 842 // CHECK1-NEXT: ret void 843 // 844 // 845 // CHECK2-LABEL: define {{[^@]+}}@_Z3sumPiiS_ 846 // CHECK2-SAME: (i32* noundef [[INPUT:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]]) #[[ATTR0:[0-9]+]] { 847 // CHECK2-NEXT: entry: 848 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 849 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 850 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 851 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 852 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 853 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 854 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 855 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 856 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 857 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 858 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 859 // CHECK2-NEXT: [[SIZE_CASTED4:%.*]] = alloca i32, align 4 860 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [3 x i8*], align 4 861 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [3 x i8*], align 4 862 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [3 x i8*], align 4 863 // CHECK2-NEXT: [[DOTOFFLOAD_SIZES10:%.*]] = alloca [3 x i64], align 4 864 // CHECK2-NEXT: [[_TMP11:%.*]] = alloca i32, align 4 865 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4 866 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_13:%.*]] = alloca i32, align 4 867 // CHECK2-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 868 // CHECK2-NEXT: [[SIZE_CASTED21:%.*]] = alloca i32, align 4 869 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [2 x i8*], align 4 870 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [2 x i8*], align 4 871 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [2 x i8*], align 4 872 // CHECK2-NEXT: [[SIZE_CASTED29:%.*]] = alloca i32, align 4 873 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS31:%.*]] = alloca [2 x i8*], align 4 874 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS32:%.*]] = alloca [2 x i8*], align 4 875 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS33:%.*]] = alloca [2 x i8*], align 4 876 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 877 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 878 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 879 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 880 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 881 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 882 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 883 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 884 // CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 885 // CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 886 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 887 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 888 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 889 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0 890 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 891 // CHECK2-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4 892 // CHECK2-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64 893 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES]] to i8* 894 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP11]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes to i8*), i32 24, i1 false) 895 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 896 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* 897 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[TMP13]], align 4 898 // CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 899 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* 900 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[TMP15]], align 4 901 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 902 // CHECK2-NEXT: store i8* null, i8** [[TMP16]], align 4 903 // CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 904 // CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32** 905 // CHECK2-NEXT: store i32* [[TMP4]], i32** [[TMP18]], align 4 906 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 907 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32** 908 // CHECK2-NEXT: store i32* [[ARRAYIDX]], i32** [[TMP20]], align 4 909 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 910 // CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 4 911 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 912 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32** 913 // CHECK2-NEXT: store i32* [[TMP6]], i32** [[TMP23]], align 4 914 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 915 // CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32** 916 // CHECK2-NEXT: store i32* [[ARRAYIDX1]], i32** [[TMP25]], align 4 917 // CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 918 // CHECK2-NEXT: store i64 [[TMP10]], i64* [[TMP26]], align 4 919 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 920 // CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 4 921 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 922 // CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 923 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 924 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 925 // CHECK2-NEXT: store i32 [[TMP31]], i32* [[DOTCAPTURE_EXPR_]], align 4 926 // CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 927 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP32]], 0 928 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 929 // CHECK2-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 930 // CHECK2-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 931 // CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 932 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP33]], 1 933 // CHECK2-NEXT: [[TMP34:%.*]] = zext i32 [[ADD]] to i64 934 // CHECK2-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 935 // CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 936 // CHECK2-NEXT: store i32 1, i32* [[TMP35]], align 4 937 // CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 938 // CHECK2-NEXT: store i32 3, i32* [[TMP36]], align 4 939 // CHECK2-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 940 // CHECK2-NEXT: store i8** [[TMP28]], i8*** [[TMP37]], align 4 941 // CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 942 // CHECK2-NEXT: store i8** [[TMP29]], i8*** [[TMP38]], align 4 943 // CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 944 // CHECK2-NEXT: store i64* [[TMP30]], i64** [[TMP39]], align 4 945 // CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 946 // CHECK2-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP40]], align 4 947 // CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 948 // CHECK2-NEXT: store i8** null, i8*** [[TMP41]], align 4 949 // CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 950 // CHECK2-NEXT: store i8** null, i8*** [[TMP42]], align 4 951 // CHECK2-NEXT: [[TMP43:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 952 // CHECK2-NEXT: store i64 [[TMP34]], i64* [[TMP43]], align 8 953 // CHECK2-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 954 // CHECK2-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 955 // CHECK2-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 956 // CHECK2: omp_offload.failed: 957 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) #[[ATTR2:[0-9]+]] 958 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] 959 // CHECK2: omp_offload.cont: 960 // CHECK2-NEXT: [[TMP46:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 961 // CHECK2-NEXT: store i32 [[TMP46]], i32* [[SIZE_CASTED4]], align 4 962 // CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[SIZE_CASTED4]], align 4 963 // CHECK2-NEXT: [[TMP48:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 964 // CHECK2-NEXT: [[TMP49:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 965 // CHECK2-NEXT: [[TMP50:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 966 // CHECK2-NEXT: [[TMP51:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 967 // CHECK2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP51]], i32 0 968 // CHECK2-NEXT: [[TMP52:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 969 // CHECK2-NEXT: [[TMP53:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 970 // CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP53]], i32 0 971 // CHECK2-NEXT: [[TMP54:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 972 // CHECK2-NEXT: [[TMP55:%.*]] = mul nuw i32 [[TMP54]], 4 973 // CHECK2-NEXT: [[TMP56:%.*]] = sext i32 [[TMP55]] to i64 974 // CHECK2-NEXT: [[TMP57:%.*]] = bitcast [3 x i64]* [[DOTOFFLOAD_SIZES10]] to i8* 975 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP57]], i8* align 4 bitcast ([3 x i64]* @.offload_sizes.7 to i8*), i32 24, i1 false) 976 // CHECK2-NEXT: [[TMP58:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 977 // CHECK2-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to i32* 978 // CHECK2-NEXT: store i32 [[TMP47]], i32* [[TMP59]], align 4 979 // CHECK2-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 980 // CHECK2-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i32* 981 // CHECK2-NEXT: store i32 [[TMP47]], i32* [[TMP61]], align 4 982 // CHECK2-NEXT: [[TMP62:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0 983 // CHECK2-NEXT: store i8* null, i8** [[TMP62]], align 4 984 // CHECK2-NEXT: [[TMP63:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1 985 // CHECK2-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32** 986 // CHECK2-NEXT: store i32* [[TMP50]], i32** [[TMP64]], align 4 987 // CHECK2-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1 988 // CHECK2-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32** 989 // CHECK2-NEXT: store i32* [[ARRAYIDX5]], i32** [[TMP66]], align 4 990 // CHECK2-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1 991 // CHECK2-NEXT: store i8* null, i8** [[TMP67]], align 4 992 // CHECK2-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 2 993 // CHECK2-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32** 994 // CHECK2-NEXT: store i32* [[TMP52]], i32** [[TMP69]], align 4 995 // CHECK2-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 2 996 // CHECK2-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** 997 // CHECK2-NEXT: store i32* [[ARRAYIDX6]], i32** [[TMP71]], align 4 998 // CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES10]], i32 0, i32 2 999 // CHECK2-NEXT: store i64 [[TMP56]], i64* [[TMP72]], align 4 1000 // CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 2 1001 // CHECK2-NEXT: store i8* null, i8** [[TMP73]], align 4 1002 // CHECK2-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 1003 // CHECK2-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 1004 // CHECK2-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES10]], i32 0, i32 0 1005 // CHECK2-NEXT: [[TMP77:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1006 // CHECK2-NEXT: store i32 [[TMP77]], i32* [[DOTCAPTURE_EXPR_12]], align 4 1007 // CHECK2-NEXT: [[TMP78:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4 1008 // CHECK2-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP78]], 0 1009 // CHECK2-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1 1010 // CHECK2-NEXT: [[SUB16:%.*]] = sub nsw i32 [[DIV15]], 1 1011 // CHECK2-NEXT: store i32 [[SUB16]], i32* [[DOTCAPTURE_EXPR_13]], align 4 1012 // CHECK2-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_13]], align 4 1013 // CHECK2-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP79]], 1 1014 // CHECK2-NEXT: [[TMP80:%.*]] = zext i32 [[ADD17]] to i64 1015 // CHECK2-NEXT: [[KERNEL_ARGS18:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 1016 // CHECK2-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 0 1017 // CHECK2-NEXT: store i32 1, i32* [[TMP81]], align 4 1018 // CHECK2-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 1 1019 // CHECK2-NEXT: store i32 3, i32* [[TMP82]], align 4 1020 // CHECK2-NEXT: [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 2 1021 // CHECK2-NEXT: store i8** [[TMP74]], i8*** [[TMP83]], align 4 1022 // CHECK2-NEXT: [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 3 1023 // CHECK2-NEXT: store i8** [[TMP75]], i8*** [[TMP84]], align 4 1024 // CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 4 1025 // CHECK2-NEXT: store i64* [[TMP76]], i64** [[TMP85]], align 4 1026 // CHECK2-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 5 1027 // CHECK2-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.8, i32 0, i32 0), i64** [[TMP86]], align 4 1028 // CHECK2-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 6 1029 // CHECK2-NEXT: store i8** null, i8*** [[TMP87]], align 4 1030 // CHECK2-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 7 1031 // CHECK2-NEXT: store i8** null, i8*** [[TMP88]], align 4 1032 // CHECK2-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]], i32 0, i32 8 1033 // CHECK2-NEXT: store i64 [[TMP80]], i64* [[TMP89]], align 8 1034 // CHECK2-NEXT: [[TMP90:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB4]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS18]]) 1035 // CHECK2-NEXT: [[TMP91:%.*]] = icmp ne i32 [[TMP90]], 0 1036 // CHECK2-NEXT: br i1 [[TMP91]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] 1037 // CHECK2: omp_offload.failed19: 1038 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73(i32 [[TMP47]], i32* [[TMP48]], i32* [[TMP49]]) #[[ATTR2]] 1039 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT20]] 1040 // CHECK2: omp_offload.cont20: 1041 // CHECK2-NEXT: [[TMP92:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1042 // CHECK2-NEXT: store i32 [[TMP92]], i32* [[SIZE_CASTED21]], align 4 1043 // CHECK2-NEXT: [[TMP93:%.*]] = load i32, i32* [[SIZE_CASTED21]], align 4 1044 // CHECK2-NEXT: [[ARRAYIDX22:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 0 1045 // CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 1046 // CHECK2-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* 1047 // CHECK2-NEXT: store i32 [[TMP93]], i32* [[TMP95]], align 4 1048 // CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 1049 // CHECK2-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* 1050 // CHECK2-NEXT: store i32 [[TMP93]], i32* [[TMP97]], align 4 1051 // CHECK2-NEXT: [[TMP98:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0 1052 // CHECK2-NEXT: store i8* null, i8** [[TMP98]], align 4 1053 // CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1 1054 // CHECK2-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to [10 x i32]** 1055 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP100]], align 4 1056 // CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 1 1057 // CHECK2-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32** 1058 // CHECK2-NEXT: store i32* [[ARRAYIDX22]], i32** [[TMP102]], align 4 1059 // CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1 1060 // CHECK2-NEXT: store i8* null, i8** [[TMP103]], align 4 1061 // CHECK2-NEXT: [[TMP104:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 1062 // CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 1063 // CHECK2-NEXT: [[KERNEL_ARGS26:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 1064 // CHECK2-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]], i32 0, i32 0 1065 // CHECK2-NEXT: store i32 1, i32* [[TMP106]], align 4 1066 // CHECK2-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]], i32 0, i32 1 1067 // CHECK2-NEXT: store i32 2, i32* [[TMP107]], align 4 1068 // CHECK2-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]], i32 0, i32 2 1069 // CHECK2-NEXT: store i8** [[TMP104]], i8*** [[TMP108]], align 4 1070 // CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]], i32 0, i32 3 1071 // CHECK2-NEXT: store i8** [[TMP105]], i8*** [[TMP109]], align 4 1072 // CHECK2-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]], i32 0, i32 4 1073 // CHECK2-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.11, i32 0, i32 0), i64** [[TMP110]], align 4 1074 // CHECK2-NEXT: [[TMP111:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]], i32 0, i32 5 1075 // CHECK2-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP111]], align 4 1076 // CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]], i32 0, i32 6 1077 // CHECK2-NEXT: store i8** null, i8*** [[TMP112]], align 4 1078 // CHECK2-NEXT: [[TMP113:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]], i32 0, i32 7 1079 // CHECK2-NEXT: store i8** null, i8*** [[TMP113]], align 4 1080 // CHECK2-NEXT: [[TMP114:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]], i32 0, i32 8 1081 // CHECK2-NEXT: store i64 0, i64* [[TMP114]], align 8 1082 // CHECK2-NEXT: [[TMP115:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB4]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS26]]) 1083 // CHECK2-NEXT: [[TMP116:%.*]] = icmp ne i32 [[TMP115]], 0 1084 // CHECK2-NEXT: br i1 [[TMP116]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] 1085 // CHECK2: omp_offload.failed27: 1086 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78(i32 [[TMP93]], [10 x i32]* [[A]]) #[[ATTR2]] 1087 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT28]] 1088 // CHECK2: omp_offload.cont28: 1089 // CHECK2-NEXT: [[TMP117:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1090 // CHECK2-NEXT: store i32 [[TMP117]], i32* [[SIZE_CASTED29]], align 4 1091 // CHECK2-NEXT: [[TMP118:%.*]] = load i32, i32* [[SIZE_CASTED29]], align 4 1092 // CHECK2-NEXT: [[ARRAYIDX30:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 3 1093 // CHECK2-NEXT: [[TMP119:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 1094 // CHECK2-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32* 1095 // CHECK2-NEXT: store i32 [[TMP118]], i32* [[TMP120]], align 4 1096 // CHECK2-NEXT: [[TMP121:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 1097 // CHECK2-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32* 1098 // CHECK2-NEXT: store i32 [[TMP118]], i32* [[TMP122]], align 4 1099 // CHECK2-NEXT: [[TMP123:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 0 1100 // CHECK2-NEXT: store i8* null, i8** [[TMP123]], align 4 1101 // CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 1 1102 // CHECK2-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to [10 x i32]** 1103 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP125]], align 4 1104 // CHECK2-NEXT: [[TMP126:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 1 1105 // CHECK2-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i32** 1106 // CHECK2-NEXT: store i32* [[ARRAYIDX30]], i32** [[TMP127]], align 4 1107 // CHECK2-NEXT: [[TMP128:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 1 1108 // CHECK2-NEXT: store i8* null, i8** [[TMP128]], align 4 1109 // CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 1110 // CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 1111 // CHECK2-NEXT: [[KERNEL_ARGS34:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 1112 // CHECK2-NEXT: [[TMP131:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]], i32 0, i32 0 1113 // CHECK2-NEXT: store i32 1, i32* [[TMP131]], align 4 1114 // CHECK2-NEXT: [[TMP132:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]], i32 0, i32 1 1115 // CHECK2-NEXT: store i32 2, i32* [[TMP132]], align 4 1116 // CHECK2-NEXT: [[TMP133:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]], i32 0, i32 2 1117 // CHECK2-NEXT: store i8** [[TMP129]], i8*** [[TMP133]], align 4 1118 // CHECK2-NEXT: [[TMP134:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]], i32 0, i32 3 1119 // CHECK2-NEXT: store i8** [[TMP130]], i8*** [[TMP134]], align 4 1120 // CHECK2-NEXT: [[TMP135:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]], i32 0, i32 4 1121 // CHECK2-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.15, i32 0, i32 0), i64** [[TMP135]], align 4 1122 // CHECK2-NEXT: [[TMP136:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]], i32 0, i32 5 1123 // CHECK2-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.16, i32 0, i32 0), i64** [[TMP136]], align 4 1124 // CHECK2-NEXT: [[TMP137:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]], i32 0, i32 6 1125 // CHECK2-NEXT: store i8** null, i8*** [[TMP137]], align 4 1126 // CHECK2-NEXT: [[TMP138:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]], i32 0, i32 7 1127 // CHECK2-NEXT: store i8** null, i8*** [[TMP138]], align 4 1128 // CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]], i32 0, i32 8 1129 // CHECK2-NEXT: store i64 0, i64* [[TMP139]], align 8 1130 // CHECK2-NEXT: [[TMP140:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB4]], i64 -1, i32 1, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS34]]) 1131 // CHECK2-NEXT: [[TMP141:%.*]] = icmp ne i32 [[TMP140]], 0 1132 // CHECK2-NEXT: br i1 [[TMP141]], label [[OMP_OFFLOAD_FAILED35:%.*]], label [[OMP_OFFLOAD_CONT36:%.*]] 1133 // CHECK2: omp_offload.failed35: 1134 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81(i32 [[TMP118]], [10 x i32]* [[A]]) #[[ATTR2]] 1135 // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT36]] 1136 // CHECK2: omp_offload.cont36: 1137 // CHECK2-NEXT: ret void 1138 // 1139 // 1140 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69 1141 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1:[0-9]+]] { 1142 // CHECK2-NEXT: entry: 1143 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1144 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1145 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1146 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1147 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1148 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1149 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1150 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1151 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 1152 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1153 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1154 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1155 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) 1156 // CHECK2-NEXT: ret void 1157 // 1158 // 1159 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 1160 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1161 // CHECK2-NEXT: entry: 1162 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1163 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1164 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1165 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1166 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1167 // CHECK2-NEXT: [[OUTPUT1:%.*]] = alloca i32, align 4 1168 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 4 1169 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1170 // CHECK2-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 1171 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1172 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 1173 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1174 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1175 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1176 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1177 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1178 // CHECK2-NEXT: [[I5:%.*]] = alloca i32, align 4 1179 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1180 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1181 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1182 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1183 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1184 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1185 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1186 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1187 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 0 1188 // CHECK2-NEXT: store i32 0, i32* [[OUTPUT1]], align 4 1189 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1190 // CHECK2-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[TMP1]] to i64 1191 // CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1192 // CHECK2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] 1193 // CHECK2-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1194 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[OUTPUT1]], i64 [[TMP5]] 1195 // CHECK2-NEXT: store i32* [[TMP6]], i32** [[TMP]], align 4 1196 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1197 // CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_]], align 4 1198 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1199 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0 1200 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1201 // CHECK2-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 1202 // CHECK2-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4 1203 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1204 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1205 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] 1206 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1207 // CHECK2: omp.precond.then: 1208 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1209 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1210 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_COMB_UB]], align 4 1211 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1212 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1213 // CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1214 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1215 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP12]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1216 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1217 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1218 // CHECK2-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]] 1219 // CHECK2-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1220 // CHECK2: cond.true: 1221 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 1222 // CHECK2-NEXT: br label [[COND_END:%.*]] 1223 // CHECK2: cond.false: 1224 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1225 // CHECK2-NEXT: br label [[COND_END]] 1226 // CHECK2: cond.end: 1227 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ] 1228 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1229 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1230 // CHECK2-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4 1231 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1232 // CHECK2: omp.inner.for.cond: 1233 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1234 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1235 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] 1236 // CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1237 // CHECK2: omp.inner.for.body: 1238 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1239 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1240 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1241 // CHECK2-NEXT: store i32 [[TMP22]], i32* [[SIZE_CASTED]], align 4 1242 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1243 // CHECK2-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP]], align 4 1244 // CHECK2-NEXT: [[TMP25:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1245 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP20]], i32 [[TMP21]], i32 [[TMP23]], i32* [[TMP24]], i32* [[TMP25]]) 1246 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1247 // CHECK2: omp.inner.for.inc: 1248 // CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1249 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1250 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP26]], [[TMP27]] 1251 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 1252 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1253 // CHECK2: omp.inner.for.end: 1254 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1255 // CHECK2: omp.loop.exit: 1256 // CHECK2-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1257 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 1258 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP29]]) 1259 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1260 // CHECK2: omp.precond.end: 1261 // CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1262 // CHECK2-NEXT: [[TMP31:%.*]] = bitcast i32* [[OUTPUT1]] to i8* 1263 // CHECK2-NEXT: store i8* [[TMP31]], i8** [[TMP30]], align 4 1264 // CHECK2-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1265 // CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4 1266 // CHECK2-NEXT: [[TMP34:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1267 // CHECK2-NEXT: [[TMP35:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP33]], i32 1, i32 4, i8* [[TMP34]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) 1268 // CHECK2-NEXT: switch i32 [[TMP35]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1269 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1270 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1271 // CHECK2-NEXT: ] 1272 // CHECK2: .omp.reduction.case1: 1273 // CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1274 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[OUTPUT1]], align 4 1275 // CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]] 1276 // CHECK2-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4 1277 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP33]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1278 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1279 // CHECK2: .omp.reduction.case2: 1280 // CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[OUTPUT1]], align 4 1281 // CHECK2-NEXT: [[TMP39:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP38]] monotonic, align 4 1282 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1283 // CHECK2: .omp.reduction.default: 1284 // CHECK2-NEXT: ret void 1285 // 1286 // 1287 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1 1288 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1289 // CHECK2-NEXT: entry: 1290 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1291 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1292 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4 1293 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4 1294 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1295 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1296 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1297 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1298 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1299 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1300 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1301 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1302 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1303 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1304 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1305 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1306 // CHECK2-NEXT: [[OUTPUT3:%.*]] = alloca i32, align 4 1307 // CHECK2-NEXT: [[_TMP4:%.*]] = alloca i32*, align 4 1308 // CHECK2-NEXT: [[I5:%.*]] = alloca i32, align 4 1309 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1310 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1311 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1312 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1313 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1314 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1315 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1316 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1317 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1318 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 1319 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1320 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0 1321 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1322 // CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 1323 // CHECK2-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1324 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1325 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1326 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]] 1327 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1328 // CHECK2: omp.precond.then: 1329 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1330 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1331 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4 1332 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1333 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1334 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_LB]], align 4 1335 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 1336 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1337 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1338 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1339 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 1340 // CHECK2-NEXT: store i32 0, i32* [[OUTPUT3]], align 4 1341 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1342 // CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint i32* [[TMP7]] to i64 1343 // CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1344 // CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]] 1345 // CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1346 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[OUTPUT3]], i64 [[TMP11]] 1347 // CHECK2-NEXT: store i32* [[TMP12]], i32** [[_TMP4]], align 4 1348 // CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1349 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4 1350 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP14]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1351 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1352 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1353 // CHECK2-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP15]], [[TMP16]] 1354 // CHECK2-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1355 // CHECK2: cond.true: 1356 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1357 // CHECK2-NEXT: br label [[COND_END:%.*]] 1358 // CHECK2: cond.false: 1359 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1360 // CHECK2-NEXT: br label [[COND_END]] 1361 // CHECK2: cond.end: 1362 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP17]], [[COND_TRUE]] ], [ [[TMP18]], [[COND_FALSE]] ] 1363 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1364 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1365 // CHECK2-NEXT: store i32 [[TMP19]], i32* [[DOTOMP_IV]], align 4 1366 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1367 // CHECK2: omp.inner.for.cond: 1368 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1369 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1370 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] 1371 // CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1372 // CHECK2: omp.inner.for.body: 1373 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1374 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1 1375 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1376 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I5]], align 4 1377 // CHECK2-NEXT: [[TMP23:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1378 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[I5]], align 4 1379 // CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]] 1380 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4 1381 // CHECK2-NEXT: [[TMP26:%.*]] = load i32*, i32** [[_TMP4]], align 4 1382 // CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 0 1383 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 1384 // CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP27]], [[TMP25]] 1385 // CHECK2-NEXT: store i32 [[ADD10]], i32* [[ARRAYIDX9]], align 4 1386 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1387 // CHECK2: omp.body.continue: 1388 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1389 // CHECK2: omp.inner.for.inc: 1390 // CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1391 // CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP28]], 1 1392 // CHECK2-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4 1393 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1394 // CHECK2: omp.inner.for.end: 1395 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1396 // CHECK2: omp.loop.exit: 1397 // CHECK2-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1398 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 1399 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) 1400 // CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1401 // CHECK2-NEXT: [[TMP32:%.*]] = bitcast i32* [[OUTPUT3]] to i8* 1402 // CHECK2-NEXT: store i8* [[TMP32]], i8** [[TMP31]], align 4 1403 // CHECK2-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1404 // CHECK2-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4 1405 // CHECK2-NEXT: [[TMP35:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1406 // CHECK2-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP34]], i32 1, i32 4, i8* [[TMP35]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 1407 // CHECK2-NEXT: switch i32 [[TMP36]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1408 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1409 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1410 // CHECK2-NEXT: ] 1411 // CHECK2: .omp.reduction.case1: 1412 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 1413 // CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[OUTPUT3]], align 4 1414 // CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP37]], [[TMP38]] 1415 // CHECK2-NEXT: store i32 [[ADD12]], i32* [[ARRAYIDX]], align 4 1416 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP34]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1417 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1418 // CHECK2: .omp.reduction.case2: 1419 // CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[OUTPUT3]], align 4 1420 // CHECK2-NEXT: [[TMP40:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP39]] monotonic, align 4 1421 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1422 // CHECK2: .omp.reduction.default: 1423 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1424 // CHECK2: omp.precond.end: 1425 // CHECK2-NEXT: ret void 1426 // 1427 // 1428 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 1429 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { 1430 // CHECK2-NEXT: entry: 1431 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1432 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1433 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1434 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1435 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1436 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1437 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1438 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1439 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1440 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1441 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1442 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1443 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1444 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1445 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1446 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 1447 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 1448 // CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 1449 // CHECK2-NEXT: ret void 1450 // 1451 // 1452 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 1453 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1454 // CHECK2-NEXT: entry: 1455 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1456 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1457 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1458 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1459 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1460 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1461 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1462 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1463 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1464 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1465 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1466 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1467 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1468 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1469 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 1470 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 1471 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 1472 // CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 1473 // CHECK2-NEXT: ret void 1474 // 1475 // 1476 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73 1477 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1478 // CHECK2-NEXT: entry: 1479 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1480 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1481 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1482 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1483 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1484 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1485 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1486 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1487 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 1488 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1489 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1490 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1491 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) 1492 // CHECK2-NEXT: ret void 1493 // 1494 // 1495 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3 1496 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1497 // CHECK2-NEXT: entry: 1498 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1499 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1500 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1501 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1502 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1503 // CHECK2-NEXT: [[OUTPUT2:%.*]] = alloca [3 x i32], align 4 1504 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 4 1505 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1506 // CHECK2-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 1507 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1508 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 1509 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1510 // CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 1511 // CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 1512 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1513 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1514 // CHECK2-NEXT: [[I6:%.*]] = alloca i32, align 4 1515 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1516 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1517 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1518 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1519 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1520 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1521 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1522 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1523 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 0 1524 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1525 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 2 1526 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[OUTPUT2]], i32 0, i32 0 1527 // CHECK2-NEXT: [[TMP2:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 3 1528 // CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP2]] 1529 // CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 1530 // CHECK2: omp.arrayinit.body: 1531 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 1532 // CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1533 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1534 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP2]] 1535 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 1536 // CHECK2: omp.arrayinit.done: 1537 // CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1538 // CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64 1539 // CHECK2-NEXT: [[TMP5:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1540 // CHECK2-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]] 1541 // CHECK2-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1542 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast [3 x i32]* [[OUTPUT2]] to i32* 1543 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP8]], i64 [[TMP7]] 1544 // CHECK2-NEXT: store i32* [[TMP9]], i32** [[TMP]], align 4 1545 // CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [3 x i32]* [[OUTPUT2]] to i32* 1546 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1547 // CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR_]], align 4 1548 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1549 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], 0 1550 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1551 // CHECK2-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 1552 // CHECK2-NEXT: store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_4]], align 4 1553 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1554 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1555 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP12]] 1556 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1557 // CHECK2: omp.precond.then: 1558 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 1559 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 1560 // CHECK2-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_COMB_UB]], align 4 1561 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1562 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1563 // CHECK2-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1564 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4 1565 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP15]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1566 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1567 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 1568 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP16]], [[TMP17]] 1569 // CHECK2-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1570 // CHECK2: cond.true: 1571 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 1572 // CHECK2-NEXT: br label [[COND_END:%.*]] 1573 // CHECK2: cond.false: 1574 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1575 // CHECK2-NEXT: br label [[COND_END]] 1576 // CHECK2: cond.end: 1577 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP18]], [[COND_TRUE]] ], [ [[TMP19]], [[COND_FALSE]] ] 1578 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 1579 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1580 // CHECK2-NEXT: store i32 [[TMP20]], i32* [[DOTOMP_IV]], align 4 1581 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1582 // CHECK2: omp.inner.for.cond: 1583 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1584 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1585 // CHECK2-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]] 1586 // CHECK2-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1587 // CHECK2: omp.inner.for.body: 1588 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 1589 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 1590 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1591 // CHECK2-NEXT: store i32 [[TMP25]], i32* [[SIZE_CASTED]], align 4 1592 // CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1593 // CHECK2-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP]], align 4 1594 // CHECK2-NEXT: [[TMP28:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1595 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP23]], i32 [[TMP24]], i32 [[TMP26]], i32* [[TMP27]], i32* [[TMP28]]) 1596 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1597 // CHECK2: omp.inner.for.inc: 1598 // CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1599 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 1600 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], [[TMP30]] 1601 // CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 1602 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1603 // CHECK2: omp.inner.for.end: 1604 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1605 // CHECK2: omp.loop.exit: 1606 // CHECK2-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1607 // CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4 1608 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP32]]) 1609 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1610 // CHECK2: omp.precond.end: 1611 // CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1612 // CHECK2-NEXT: [[TMP34:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 1613 // CHECK2-NEXT: store i8* [[TMP34]], i8** [[TMP33]], align 4 1614 // CHECK2-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1615 // CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4 1616 // CHECK2-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1617 // CHECK2-NEXT: [[TMP38:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], i32 1, i32 4, i8* [[TMP37]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var) 1618 // CHECK2-NEXT: switch i32 [[TMP38]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1619 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1620 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1621 // CHECK2-NEXT: ] 1622 // CHECK2: .omp.reduction.case1: 1623 // CHECK2-NEXT: [[TMP39:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1624 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP39]] 1625 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE13:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1626 // CHECK2: omp.arraycpy.body: 1627 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1628 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST9:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1629 // CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], align 4 1630 // CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1631 // CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] 1632 // CHECK2-NEXT: store i32 [[ADD10]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], align 4 1633 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], i32 1 1634 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1635 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE12:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP39]] 1636 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE12]], label [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_BODY]] 1637 // CHECK2: omp.arraycpy.done13: 1638 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1639 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1640 // CHECK2: .omp.reduction.case2: 1641 // CHECK2-NEXT: [[TMP42:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1642 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY14:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP42]] 1643 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY14]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY15:%.*]] 1644 // CHECK2: omp.arraycpy.body15: 1645 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST16:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT19:%.*]], [[OMP_ARRAYCPY_BODY15]] ] 1646 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST17:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT18:%.*]], [[OMP_ARRAYCPY_BODY15]] ] 1647 // CHECK2-NEXT: [[TMP43:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST16]], align 4 1648 // CHECK2-NEXT: [[TMP44:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST17]], i32 [[TMP43]] monotonic, align 4 1649 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT18]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST17]], i32 1 1650 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT19]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST16]], i32 1 1651 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT18]], [[TMP42]] 1652 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY15]] 1653 // CHECK2: omp.arraycpy.done21: 1654 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1655 // CHECK2: .omp.reduction.default: 1656 // CHECK2-NEXT: ret void 1657 // 1658 // 1659 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 1660 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { 1661 // CHECK2-NEXT: entry: 1662 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1663 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1664 // CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4 1665 // CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4 1666 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1667 // CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 1668 // CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 1669 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 1670 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 1671 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 1672 // CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 1673 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1674 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 1675 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 1676 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 1677 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 1678 // CHECK2-NEXT: [[OUTPUT4:%.*]] = alloca [3 x i32], align 4 1679 // CHECK2-NEXT: [[_TMP5:%.*]] = alloca i32*, align 4 1680 // CHECK2-NEXT: [[I6:%.*]] = alloca i32, align 4 1681 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1682 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1683 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1684 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1685 // CHECK2-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1686 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1687 // CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 1688 // CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 1689 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1690 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 1691 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1692 // CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0 1693 // CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 1694 // CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 1695 // CHECK2-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 1696 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1697 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 1698 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]] 1699 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] 1700 // CHECK2: omp.precond.then: 1701 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 1702 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1703 // CHECK2-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4 1704 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4 1705 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4 1706 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_LB]], align 4 1707 // CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 1708 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 1709 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 1710 // CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1711 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 1712 // CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1713 // CHECK2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 2 1714 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[OUTPUT4]], i32 0, i32 0 1715 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 3 1716 // CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP8]] 1717 // CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 1718 // CHECK2: omp.arrayinit.body: 1719 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[OMP_PRECOND_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 1720 // CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1721 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1722 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] 1723 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 1724 // CHECK2: omp.arrayinit.done: 1725 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 1726 // CHECK2-NEXT: [[TMP10:%.*]] = ptrtoint i32* [[TMP9]] to i64 1727 // CHECK2-NEXT: [[TMP11:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1728 // CHECK2-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]] 1729 // CHECK2-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1730 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast [3 x i32]* [[OUTPUT4]] to i32* 1731 // CHECK2-NEXT: [[TMP15:%.*]] = getelementptr i32, i32* [[TMP14]], i64 [[TMP13]] 1732 // CHECK2-NEXT: store i32* [[TMP15]], i32** [[_TMP5]], align 4 1733 // CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [3 x i32]* [[OUTPUT4]] to i32* 1734 // CHECK2-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1735 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4 1736 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP17]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 1737 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1738 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1739 // CHECK2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP18]], [[TMP19]] 1740 // CHECK2-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 1741 // CHECK2: cond.true: 1742 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 1743 // CHECK2-NEXT: br label [[COND_END:%.*]] 1744 // CHECK2: cond.false: 1745 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1746 // CHECK2-NEXT: br label [[COND_END]] 1747 // CHECK2: cond.end: 1748 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP20]], [[COND_TRUE]] ], [ [[TMP21]], [[COND_FALSE]] ] 1749 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 1750 // CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 1751 // CHECK2-NEXT: store i32 [[TMP22]], i32* [[DOTOMP_IV]], align 4 1752 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1753 // CHECK2: omp.inner.for.cond: 1754 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1755 // CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 1756 // CHECK2-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]] 1757 // CHECK2-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1758 // CHECK2: omp.inner.for.body: 1759 // CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1760 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP25]], 1 1761 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 1762 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I6]], align 4 1763 // CHECK2-NEXT: [[TMP26:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 1764 // CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[I6]], align 4 1765 // CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]] 1766 // CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 1767 // CHECK2-NEXT: [[TMP29:%.*]] = load i32*, i32** [[_TMP5]], align 4 1768 // CHECK2-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 0 1769 // CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX10]], align 4 1770 // CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP30]], [[TMP28]] 1771 // CHECK2-NEXT: store i32 [[ADD11]], i32* [[ARRAYIDX10]], align 4 1772 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 1773 // CHECK2: omp.body.continue: 1774 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1775 // CHECK2: omp.inner.for.inc: 1776 // CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 1777 // CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP31]], 1 1778 // CHECK2-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4 1779 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1780 // CHECK2: omp.inner.for.end: 1781 // CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 1782 // CHECK2: omp.loop.exit: 1783 // CHECK2-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1784 // CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4 1785 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]]) 1786 // CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1787 // CHECK2-NEXT: [[TMP35:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 1788 // CHECK2-NEXT: store i8* [[TMP35]], i8** [[TMP34]], align 4 1789 // CHECK2-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1790 // CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4 1791 // CHECK2-NEXT: [[TMP38:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1792 // CHECK2-NEXT: [[TMP39:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP37]], i32 1, i32 4, i8* [[TMP38]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var) 1793 // CHECK2-NEXT: switch i32 [[TMP39]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1794 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1795 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1796 // CHECK2-NEXT: ] 1797 // CHECK2: .omp.reduction.case1: 1798 // CHECK2-NEXT: [[TMP40:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1799 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP40]] 1800 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE17:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1801 // CHECK2: omp.arraycpy.body: 1802 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1803 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST13:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT15:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1804 // CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], align 4 1805 // CHECK2-NEXT: [[TMP42:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1806 // CHECK2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP41]], [[TMP42]] 1807 // CHECK2-NEXT: store i32 [[ADD14]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], align 4 1808 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT15]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], i32 1 1809 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1810 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE16:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT15]], [[TMP40]] 1811 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE16]], label [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_BODY]] 1812 // CHECK2: omp.arraycpy.done17: 1813 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP37]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1814 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1815 // CHECK2: .omp.reduction.case2: 1816 // CHECK2-NEXT: [[TMP43:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 1817 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY18:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP43]] 1818 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY18]], label [[OMP_ARRAYCPY_DONE25:%.*]], label [[OMP_ARRAYCPY_BODY19:%.*]] 1819 // CHECK2: omp.arraycpy.body19: 1820 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST20:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT23:%.*]], [[OMP_ARRAYCPY_BODY19]] ] 1821 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT22:%.*]], [[OMP_ARRAYCPY_BODY19]] ] 1822 // CHECK2-NEXT: [[TMP44:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 4 1823 // CHECK2-NEXT: [[TMP45:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 [[TMP44]] monotonic, align 4 1824 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT22]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1 1825 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT23]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], i32 1 1826 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE24:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT22]], [[TMP43]] 1827 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE24]], label [[OMP_ARRAYCPY_DONE25]], label [[OMP_ARRAYCPY_BODY19]] 1828 // CHECK2: omp.arraycpy.done25: 1829 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1830 // CHECK2: .omp.reduction.default: 1831 // CHECK2-NEXT: br label [[OMP_PRECOND_END]] 1832 // CHECK2: omp.precond.end: 1833 // CHECK2-NEXT: ret void 1834 // 1835 // 1836 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5 1837 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1838 // CHECK2-NEXT: entry: 1839 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1840 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1841 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1842 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1843 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1844 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1845 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1846 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1847 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1848 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1849 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1850 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1851 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1852 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1853 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 3 1854 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 1855 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1856 // CHECK2: omp.arraycpy.body: 1857 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1858 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1859 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1860 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1861 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1862 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1863 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1864 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1865 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 1866 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 1867 // CHECK2: omp.arraycpy.done2: 1868 // CHECK2-NEXT: ret void 1869 // 1870 // 1871 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6 1872 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1873 // CHECK2-NEXT: entry: 1874 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1875 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1876 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1877 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1878 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1879 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1880 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1881 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1882 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 1883 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1884 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1885 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 1886 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1887 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1888 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 3 1889 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 1890 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1891 // CHECK2: omp.arraycpy.body: 1892 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1893 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1894 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1895 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1896 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 1897 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1898 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1899 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1900 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 1901 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 1902 // CHECK2: omp.arraycpy.done2: 1903 // CHECK2-NEXT: ret void 1904 // 1905 // 1906 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78 1907 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR6:[0-9]+]] { 1908 // CHECK2-NEXT: entry: 1909 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1910 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1911 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 1912 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1913 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1914 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1915 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1916 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[SIZE_CASTED]], align 4 1917 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 1918 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP2]], [10 x i32]* [[TMP0]]) 1919 // CHECK2-NEXT: ret void 1920 // 1921 // 1922 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 1923 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR1]] { 1924 // CHECK2-NEXT: entry: 1925 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1926 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1927 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 1928 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 1929 // CHECK2-NEXT: [[A2:%.*]] = alloca [2 x i32], align 4 1930 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 1931 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 1932 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1933 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1934 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 1935 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 1936 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 1937 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 0 1938 // CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 1 1939 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A2]], i32 0, i32 0 1940 // CHECK2-NEXT: [[TMP1:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 2 1941 // CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP1]] 1942 // CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 1943 // CHECK2: omp.arrayinit.body: 1944 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 1945 // CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 1946 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1947 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP1]] 1948 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 1949 // CHECK2: omp.arrayinit.done: 1950 // CHECK2-NEXT: [[TMP2:%.*]] = bitcast [10 x i32]* [[TMP0]] to i32* 1951 // CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[TMP2]] to i64 1952 // CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 1953 // CHECK2-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]] 1954 // CHECK2-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 1955 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast [2 x i32]* [[A2]] to i32* 1956 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP7]], i64 [[TMP6]] 1957 // CHECK2-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to [10 x i32]* 1958 // CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [2 x i32]* [[A2]] to i32* 1959 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 1960 // CHECK2-NEXT: br label [[FOR_COND:%.*]] 1961 // CHECK2: for.cond: 1962 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 1963 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 1964 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP10]], [[TMP11]] 1965 // CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 1966 // CHECK2: for.body: 1967 // CHECK2-NEXT: br label [[FOR_INC:%.*]] 1968 // CHECK2: for.inc: 1969 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4 1970 // CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1 1971 // CHECK2-NEXT: store i32 [[INC]], i32* [[I]], align 4 1972 // CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] 1973 // CHECK2: for.end: 1974 // CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1975 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 1976 // CHECK2-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 4 1977 // CHECK2-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1978 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4 1979 // CHECK2-NEXT: [[TMP17:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1980 // CHECK2-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32 1, i32 4, i8* [[TMP17]], void (i8*, i8*)* @.omp.reduction.reduction_func.10, [8 x i32]* @.gomp_critical_user_.reduction.var) 1981 // CHECK2-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1982 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1983 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1984 // CHECK2-NEXT: ] 1985 // CHECK2: .omp.reduction.case1: 1986 // CHECK2-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 2 1987 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP19]] 1988 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1989 // CHECK2: omp.arraycpy.body: 1990 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1991 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1992 // CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 1993 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 1994 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] 1995 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 1996 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 1997 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1998 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP19]] 1999 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 2000 // CHECK2: omp.arraycpy.done6: 2001 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2002 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2003 // CHECK2: .omp.reduction.case2: 2004 // CHECK2-NEXT: [[TMP22:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 2 2005 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP22]] 2006 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 2007 // CHECK2: omp.arraycpy.body8: 2008 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2009 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2010 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 2011 // CHECK2-NEXT: [[TMP24:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP23]] monotonic, align 4 2012 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 2013 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 2014 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP22]] 2015 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 2016 // CHECK2: omp.arraycpy.done14: 2017 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2018 // CHECK2: .omp.reduction.default: 2019 // CHECK2-NEXT: ret void 2020 // 2021 // 2022 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.10 2023 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 2024 // CHECK2-NEXT: entry: 2025 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 2026 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 2027 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 2028 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 2029 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 2030 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2031 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 2032 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2033 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 2034 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 2035 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 2036 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 2037 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 2038 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 2039 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 2 2040 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 2041 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 2042 // CHECK2: omp.arraycpy.body: 2043 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2044 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2045 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2046 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 2047 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 2048 // CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2049 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 2050 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 2051 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 2052 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 2053 // CHECK2: omp.arraycpy.done2: 2054 // CHECK2-NEXT: ret void 2055 // 2056 // 2057 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81 2058 // CHECK2-SAME: (i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR6]] { 2059 // CHECK2-NEXT: entry: 2060 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 2061 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 2062 // CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 2063 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 2064 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 2065 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 2066 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 2067 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[SIZE_CASTED]], align 4 2068 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 2069 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], [10 x i32]* [[TMP0]]) 2070 // CHECK2-NEXT: ret void 2071 // 2072 // 2073 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 2074 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR1]] { 2075 // CHECK2-NEXT: entry: 2076 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2077 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2078 // CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 2079 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 2080 // CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4 2081 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 2082 // CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 2083 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2084 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2085 // CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 2086 // CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 2087 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 2088 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 3 2089 // CHECK2-NEXT: store i32 0, i32* [[A1]], align 4 2090 // CHECK2-NEXT: [[TMP1:%.*]] = bitcast [10 x i32]* [[TMP0]] to i32* 2091 // CHECK2-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[TMP1]] to i64 2092 // CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 2093 // CHECK2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] 2094 // CHECK2-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) 2095 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[A1]], i64 [[TMP5]] 2096 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to [10 x i32]* 2097 // CHECK2-NEXT: store i32 0, i32* [[I]], align 4 2098 // CHECK2-NEXT: br label [[FOR_COND:%.*]] 2099 // CHECK2: for.cond: 2100 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 2101 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 2102 // CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] 2103 // CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] 2104 // CHECK2: for.body: 2105 // CHECK2-NEXT: br label [[FOR_INC:%.*]] 2106 // CHECK2: for.inc: 2107 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 2108 // CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1 2109 // CHECK2-NEXT: store i32 [[INC]], i32* [[I]], align 4 2110 // CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] 2111 // CHECK2: for.end: 2112 // CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 2113 // CHECK2-NEXT: [[TMP12:%.*]] = bitcast i32* [[A1]] to i8* 2114 // CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 4 2115 // CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 2116 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4 2117 // CHECK2-NEXT: [[TMP15:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2118 // CHECK2-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 1, i32 4, i8* [[TMP15]], void (i8*, i8*)* @.omp.reduction.reduction_func.14, [8 x i32]* @.gomp_critical_user_.reduction.var) 2119 // CHECK2-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2120 // CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2121 // CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2122 // CHECK2-NEXT: ] 2123 // CHECK2: .omp.reduction.case1: 2124 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 2125 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[A1]], align 4 2126 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], [[TMP18]] 2127 // CHECK2-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX]], align 4 2128 // CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2129 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2130 // CHECK2: .omp.reduction.case2: 2131 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[A1]], align 4 2132 // CHECK2-NEXT: [[TMP20:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP19]] monotonic, align 4 2133 // CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2134 // CHECK2: .omp.reduction.default: 2135 // CHECK2-NEXT: ret void 2136 // 2137 // 2138 // CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.14 2139 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 2140 // CHECK2-NEXT: entry: 2141 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 2142 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 2143 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 2144 // CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 2145 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 2146 // CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2147 // CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 2148 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2149 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 2150 // CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 2151 // CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 2152 // CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 2153 // CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 2154 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 2155 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 2156 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 2157 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2158 // CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 2159 // CHECK2-NEXT: ret void 2160 // 2161 // 2162 // CHECK2-LABEL: define {{[^@]+}}@main 2163 // CHECK2-SAME: () #[[ATTR7:[0-9]+]] { 2164 // CHECK2-NEXT: entry: 2165 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 2166 // CHECK2-NEXT: [[SIZE:%.*]] = alloca i32, align 4 2167 // CHECK2-NEXT: [[ARRAY:%.*]] = alloca i32*, align 4 2168 // CHECK2-NEXT: [[RESULT:%.*]] = alloca i32, align 4 2169 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 2170 // CHECK2-NEXT: store i32 100, i32* [[SIZE]], align 4 2171 // CHECK2-NEXT: [[CALL:%.*]] = call noalias noundef nonnull i8* @_Znaj(i32 noundef 400) #[[ATTR10:[0-9]+]] 2172 // CHECK2-NEXT: [[TMP0:%.*]] = bitcast i8* [[CALL]] to i32* 2173 // CHECK2-NEXT: store i32* [[TMP0]], i32** [[ARRAY]], align 4 2174 // CHECK2-NEXT: store i32 0, i32* [[RESULT]], align 4 2175 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[ARRAY]], align 4 2176 // CHECK2-NEXT: call void @_Z3sumPiiS_(i32* noundef [[TMP1]], i32 noundef 100, i32* noundef [[RESULT]]) 2177 // CHECK2-NEXT: ret i32 0 2178 // 2179 // 2180 // CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 2181 // CHECK2-SAME: () #[[ATTR9:[0-9]+]] { 2182 // CHECK2-NEXT: entry: 2183 // CHECK2-NEXT: call void @__tgt_register_requires(i64 1) 2184 // CHECK2-NEXT: ret void 2185 // 2186