1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // Test host codegen. 3 // RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 4 // RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s 5 // RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1 6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3 7 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s 8 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3 9 10 // Test target codegen - host bc file has to be created first. 11 // RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc 12 // RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK5 13 // RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s 14 // RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5 15 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc 16 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK7 17 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s 18 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK7 19 20 // expected-no-diagnostics 21 #ifndef HEADER 22 #define HEADER 23 24 25 void mapWithPrivate() { 26 int x, y; 27 #pragma omp target teams private(x) map(x,y) private(y) 28 ; 29 } 30 31 void mapWithFirstprivate() { 32 int x, y; 33 #pragma omp target teams firstprivate(x) map(x,y) firstprivate(y) 34 ; 35 } 36 37 void mapWithReduction() { 38 int x, y; 39 #pragma omp target teams reduction(+:x) map(x,y) reduction(+:y) 40 ; 41 } 42 43 void mapFrom() { 44 int x; 45 #pragma omp target teams firstprivate(x) map(from:x) 46 ; 47 } 48 49 void mapTo() { 50 int x; 51 #pragma omp target teams firstprivate(x) map(to:x) 52 ; 53 } 54 55 void mapAlloc() { 56 int x; 57 #pragma omp target teams firstprivate(x) map(alloc:x) 58 ; 59 } 60 61 void mapArray() { 62 int x[77], y[88], z[99]; 63 #pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(x,y,z) 64 ; 65 #pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(to:x,y,z) 66 ; 67 } 68 69 # if HAS_INT128 70 void mapInt128() { 71 __int128 x, y, z; 72 #pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(x,y,z) 73 ; 74 #pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(from:x,y,z) 75 ; 76 } 77 # endif 78 #endif 79 // CHECK1-LABEL: define {{[^@]+}}@_Z14mapWithPrivatev 80 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 81 // CHECK1-NEXT: entry: 82 // CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4 83 // CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4 84 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8 85 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8 86 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8 87 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 88 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 89 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8 90 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 91 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 92 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8 93 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 94 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 95 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 96 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32** 97 // CHECK1-NEXT: store i32* [[Y]], i32** [[TMP6]], align 8 98 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 99 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32** 100 // CHECK1-NEXT: store i32* [[Y]], i32** [[TMP8]], align 8 101 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 102 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8 103 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 104 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 105 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 106 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 107 // CHECK1-NEXT: store i32 1, i32* [[TMP12]], align 4 108 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 109 // CHECK1-NEXT: store i32 2, i32* [[TMP13]], align 4 110 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 111 // CHECK1-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 8 112 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 113 // CHECK1-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 8 114 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 115 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP16]], align 8 116 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 117 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP17]], align 8 118 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 119 // CHECK1-NEXT: store i8** null, i8*** [[TMP18]], align 8 120 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 121 // CHECK1-NEXT: store i8** null, i8*** [[TMP19]], align 8 122 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 123 // CHECK1-NEXT: store i64 0, i64* [[TMP20]], align 8 124 // CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 125 // CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 126 // CHECK1-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 127 // CHECK1: omp_offload.failed: 128 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27() #[[ATTR2:[0-9]+]] 129 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 130 // CHECK1: omp_offload.cont: 131 // CHECK1-NEXT: ret void 132 // 133 // 134 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27 135 // CHECK1-SAME: () #[[ATTR1:[0-9]+]] { 136 // CHECK1-NEXT: entry: 137 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 138 // CHECK1-NEXT: ret void 139 // 140 // 141 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 142 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 143 // CHECK1-NEXT: entry: 144 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 145 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 146 // CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4 147 // CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4 148 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 149 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 150 // CHECK1-NEXT: ret void 151 // 152 // 153 // CHECK1-LABEL: define {{[^@]+}}@_Z19mapWithFirstprivatev 154 // CHECK1-SAME: () #[[ATTR0]] { 155 // CHECK1-NEXT: entry: 156 // CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4 157 // CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4 158 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8 159 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8 160 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8 161 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 162 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 163 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8 164 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 165 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 166 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8 167 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 168 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 169 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 170 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32** 171 // CHECK1-NEXT: store i32* [[Y]], i32** [[TMP6]], align 8 172 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 173 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32** 174 // CHECK1-NEXT: store i32* [[Y]], i32** [[TMP8]], align 8 175 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 176 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8 177 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 178 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 179 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 180 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 181 // CHECK1-NEXT: store i32 1, i32* [[TMP12]], align 4 182 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 183 // CHECK1-NEXT: store i32 2, i32* [[TMP13]], align 4 184 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 185 // CHECK1-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 8 186 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 187 // CHECK1-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 8 188 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 189 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP16]], align 8 190 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 191 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP17]], align 8 192 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 193 // CHECK1-NEXT: store i8** null, i8*** [[TMP18]], align 8 194 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 195 // CHECK1-NEXT: store i8** null, i8*** [[TMP19]], align 8 196 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 197 // CHECK1-NEXT: store i64 0, i64* [[TMP20]], align 8 198 // CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 199 // CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 200 // CHECK1-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 201 // CHECK1: omp_offload.failed: 202 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33(i32* [[X]], i32* [[Y]]) #[[ATTR2]] 203 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 204 // CHECK1: omp_offload.cont: 205 // CHECK1-NEXT: ret void 206 // 207 // 208 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33 209 // CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] { 210 // CHECK1-NEXT: entry: 211 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 212 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8 213 // CHECK1-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8 214 // CHECK1-NEXT: [[Y_CASTED:%.*]] = alloca i64, align 8 215 // CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 216 // CHECK1-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8 217 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 218 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8 219 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 220 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32* 221 // CHECK1-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 222 // CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[X_CASTED]], align 8 223 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4 224 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[Y_CASTED]] to i32* 225 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV1]], align 4 226 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[Y_CASTED]], align 8 227 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP5]]) 228 // CHECK1-NEXT: ret void 229 // 230 // 231 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 232 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]], i64 noundef [[Y:%.*]]) #[[ATTR1]] { 233 // CHECK1-NEXT: entry: 234 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 235 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 236 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8 237 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i64, align 8 238 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 239 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 240 // CHECK1-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8 241 // CHECK1-NEXT: store i64 [[Y]], i64* [[Y_ADDR]], align 8 242 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32* 243 // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[Y_ADDR]] to i32* 244 // CHECK1-NEXT: ret void 245 // 246 // 247 // CHECK1-LABEL: define {{[^@]+}}@_Z16mapWithReductionv 248 // CHECK1-SAME: () #[[ATTR0]] { 249 // CHECK1-NEXT: entry: 250 // CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4 251 // CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4 252 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8 253 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8 254 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8 255 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 256 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 257 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8 258 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 259 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 260 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8 261 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 262 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 263 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 264 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32** 265 // CHECK1-NEXT: store i32* [[Y]], i32** [[TMP6]], align 8 266 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 267 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32** 268 // CHECK1-NEXT: store i32* [[Y]], i32** [[TMP8]], align 8 269 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 270 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8 271 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 272 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 273 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 274 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 275 // CHECK1-NEXT: store i32 1, i32* [[TMP12]], align 4 276 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 277 // CHECK1-NEXT: store i32 2, i32* [[TMP13]], align 4 278 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 279 // CHECK1-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 8 280 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 281 // CHECK1-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 8 282 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 283 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP16]], align 8 284 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 285 // CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP17]], align 8 286 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 287 // CHECK1-NEXT: store i8** null, i8*** [[TMP18]], align 8 288 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 289 // CHECK1-NEXT: store i8** null, i8*** [[TMP19]], align 8 290 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 291 // CHECK1-NEXT: store i64 0, i64* [[TMP20]], align 8 292 // CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 293 // CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 294 // CHECK1-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 295 // CHECK1: omp_offload.failed: 296 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39(i32* [[X]], i32* [[Y]]) #[[ATTR2]] 297 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 298 // CHECK1: omp_offload.cont: 299 // CHECK1-NEXT: ret void 300 // 301 // 302 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39 303 // CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] { 304 // CHECK1-NEXT: entry: 305 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 306 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8 307 // CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 308 // CHECK1-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8 309 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 310 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8 311 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]]) 312 // CHECK1-NEXT: ret void 313 // 314 // 315 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4 316 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] { 317 // CHECK1-NEXT: entry: 318 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 319 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 320 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 321 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8 322 // CHECK1-NEXT: [[X1:%.*]] = alloca i32, align 4 323 // CHECK1-NEXT: [[Y2:%.*]] = alloca i32, align 4 324 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8 325 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 326 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 327 // CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 328 // CHECK1-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8 329 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 330 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8 331 // CHECK1-NEXT: store i32 0, i32* [[X1]], align 4 332 // CHECK1-NEXT: store i32 0, i32* [[Y2]], align 4 333 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 334 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i32* [[X1]] to i8* 335 // CHECK1-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 8 336 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 337 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8* 338 // CHECK1-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8 339 // CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 340 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 341 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 342 // CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i64 16, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 343 // CHECK1-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 344 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 345 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 346 // CHECK1-NEXT: ] 347 // CHECK1: .omp.reduction.case1: 348 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4 349 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[X1]], align 4 350 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] 351 // CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4 352 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4 353 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4 354 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 355 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[TMP1]], align 4 356 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var) 357 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 358 // CHECK1: .omp.reduction.case2: 359 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[X1]], align 4 360 // CHECK1-NEXT: [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4 361 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4 362 // CHECK1-NEXT: [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4 363 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var) 364 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 365 // CHECK1: .omp.reduction.default: 366 // CHECK1-NEXT: ret void 367 // 368 // 369 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 370 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { 371 // CHECK1-NEXT: entry: 372 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 373 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 374 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 375 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 376 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 377 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]* 378 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 379 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]* 380 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0 381 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 382 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 383 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0 384 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 385 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 386 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1 387 // CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8 388 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* 389 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1 390 // CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8 391 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32* 392 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4 393 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4 394 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 395 // CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 396 // CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4 397 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4 398 // CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] 399 // CHECK1-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4 400 // CHECK1-NEXT: ret void 401 // 402 // 403 // CHECK1-LABEL: define {{[^@]+}}@_Z7mapFromv 404 // CHECK1-SAME: () #[[ATTR0]] { 405 // CHECK1-NEXT: entry: 406 // CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4 407 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 408 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 409 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 410 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 411 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 412 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8 413 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 414 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 415 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8 416 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 417 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 418 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 419 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 420 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 421 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 422 // CHECK1-NEXT: store i32 1, i32* [[TMP7]], align 4 423 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 424 // CHECK1-NEXT: store i32 1, i32* [[TMP8]], align 4 425 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 426 // CHECK1-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 8 427 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 428 // CHECK1-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 8 429 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 430 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64** [[TMP11]], align 8 431 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 432 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP12]], align 8 433 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 434 // CHECK1-NEXT: store i8** null, i8*** [[TMP13]], align 8 435 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 436 // CHECK1-NEXT: store i8** null, i8*** [[TMP14]], align 8 437 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 438 // CHECK1-NEXT: store i64 0, i64* [[TMP15]], align 8 439 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 440 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 441 // CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 442 // CHECK1: omp_offload.failed: 443 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45(i32* [[X]]) #[[ATTR2]] 444 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 445 // CHECK1: omp_offload.cont: 446 // CHECK1-NEXT: ret void 447 // 448 // 449 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45 450 // CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] { 451 // CHECK1-NEXT: entry: 452 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 453 // CHECK1-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8 454 // CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 455 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 456 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 457 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32* 458 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4 459 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8 460 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP2]]) 461 // CHECK1-NEXT: ret void 462 // 463 // 464 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7 465 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR1]] { 466 // CHECK1-NEXT: entry: 467 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 468 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 469 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8 470 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 471 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 472 // CHECK1-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8 473 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32* 474 // CHECK1-NEXT: ret void 475 // 476 // 477 // CHECK1-LABEL: define {{[^@]+}}@_Z5mapTov 478 // CHECK1-SAME: () #[[ATTR0]] { 479 // CHECK1-NEXT: entry: 480 // CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4 481 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 482 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 483 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 484 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 485 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 486 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8 487 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 488 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 489 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8 490 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 491 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 492 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 493 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 494 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 495 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 496 // CHECK1-NEXT: store i32 1, i32* [[TMP7]], align 4 497 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 498 // CHECK1-NEXT: store i32 1, i32* [[TMP8]], align 4 499 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 500 // CHECK1-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 8 501 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 502 // CHECK1-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 8 503 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 504 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64** [[TMP11]], align 8 505 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 506 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP12]], align 8 507 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 508 // CHECK1-NEXT: store i8** null, i8*** [[TMP13]], align 8 509 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 510 // CHECK1-NEXT: store i8** null, i8*** [[TMP14]], align 8 511 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 512 // CHECK1-NEXT: store i64 0, i64* [[TMP15]], align 8 513 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 514 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 515 // CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 516 // CHECK1: omp_offload.failed: 517 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51(i32* [[X]]) #[[ATTR2]] 518 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 519 // CHECK1: omp_offload.cont: 520 // CHECK1-NEXT: ret void 521 // 522 // 523 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51 524 // CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] { 525 // CHECK1-NEXT: entry: 526 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 527 // CHECK1-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8 528 // CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 529 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 530 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 531 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32* 532 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4 533 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8 534 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP2]]) 535 // CHECK1-NEXT: ret void 536 // 537 // 538 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10 539 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR1]] { 540 // CHECK1-NEXT: entry: 541 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 542 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 543 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8 544 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 545 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 546 // CHECK1-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8 547 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32* 548 // CHECK1-NEXT: ret void 549 // 550 // 551 // CHECK1-LABEL: define {{[^@]+}}@_Z8mapAllocv 552 // CHECK1-SAME: () #[[ATTR0]] { 553 // CHECK1-NEXT: entry: 554 // CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4 555 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 556 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 557 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 558 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 559 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 560 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8 561 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 562 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 563 // CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8 564 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 565 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 566 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 567 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 568 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 569 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 570 // CHECK1-NEXT: store i32 1, i32* [[TMP7]], align 4 571 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 572 // CHECK1-NEXT: store i32 1, i32* [[TMP8]], align 4 573 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 574 // CHECK1-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 8 575 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 576 // CHECK1-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 8 577 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 578 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP11]], align 8 579 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 580 // CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP12]], align 8 581 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 582 // CHECK1-NEXT: store i8** null, i8*** [[TMP13]], align 8 583 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 584 // CHECK1-NEXT: store i8** null, i8*** [[TMP14]], align 8 585 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 586 // CHECK1-NEXT: store i64 0, i64* [[TMP15]], align 8 587 // CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 588 // CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 589 // CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 590 // CHECK1: omp_offload.failed: 591 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57(i32* [[X]]) #[[ATTR2]] 592 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 593 // CHECK1: omp_offload.cont: 594 // CHECK1-NEXT: ret void 595 // 596 // 597 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57 598 // CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] { 599 // CHECK1-NEXT: entry: 600 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 601 // CHECK1-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8 602 // CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 603 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 604 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 605 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32* 606 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4 607 // CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8 608 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]]) 609 // CHECK1-NEXT: ret void 610 // 611 // 612 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13 613 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR1]] { 614 // CHECK1-NEXT: entry: 615 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 616 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 617 // CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8 618 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 619 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 620 // CHECK1-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8 621 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32* 622 // CHECK1-NEXT: ret void 623 // 624 // 625 // CHECK1-LABEL: define {{[^@]+}}@_Z8mapArrayv 626 // CHECK1-SAME: () #[[ATTR0]] { 627 // CHECK1-NEXT: entry: 628 // CHECK1-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 629 // CHECK1-NEXT: [[Y:%.*]] = alloca [88 x i32], align 4 630 // CHECK1-NEXT: [[Z:%.*]] = alloca [99 x i32], align 4 631 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 632 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 633 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 634 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 8 635 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 8 636 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 8 637 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 638 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [88 x i32]** 639 // CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP1]], align 8 640 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 641 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [88 x i32]** 642 // CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP3]], align 8 643 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 644 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 645 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 646 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [99 x i32]** 647 // CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP6]], align 8 648 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 649 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [99 x i32]** 650 // CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP8]], align 8 651 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 652 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8 653 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 654 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to [77 x i32]** 655 // CHECK1-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP11]], align 8 656 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 657 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to [77 x i32]** 658 // CHECK1-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP13]], align 8 659 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 660 // CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 661 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 662 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 663 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 664 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 665 // CHECK1-NEXT: store i32 1, i32* [[TMP17]], align 4 666 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 667 // CHECK1-NEXT: store i32 3, i32* [[TMP18]], align 4 668 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 669 // CHECK1-NEXT: store i8** [[TMP15]], i8*** [[TMP19]], align 8 670 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 671 // CHECK1-NEXT: store i8** [[TMP16]], i8*** [[TMP20]], align 8 672 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 673 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.18, i32 0, i32 0), i64** [[TMP21]], align 8 674 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 675 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.19, i32 0, i32 0), i64** [[TMP22]], align 8 676 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 677 // CHECK1-NEXT: store i8** null, i8*** [[TMP23]], align 8 678 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 679 // CHECK1-NEXT: store i8** null, i8*** [[TMP24]], align 8 680 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 681 // CHECK1-NEXT: store i64 0, i64* [[TMP25]], align 8 682 // CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 683 // CHECK1-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0 684 // CHECK1-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 685 // CHECK1: omp_offload.failed: 686 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]] 687 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 688 // CHECK1: omp_offload.cont: 689 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 690 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to [88 x i32]** 691 // CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP29]], align 8 692 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 693 // CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to [88 x i32]** 694 // CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP31]], align 8 695 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0 696 // CHECK1-NEXT: store i8* null, i8** [[TMP32]], align 8 697 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1 698 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to [99 x i32]** 699 // CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP34]], align 8 700 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1 701 // CHECK1-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [99 x i32]** 702 // CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP36]], align 8 703 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 1 704 // CHECK1-NEXT: store i8* null, i8** [[TMP37]], align 8 705 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2 706 // CHECK1-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [77 x i32]** 707 // CHECK1-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP39]], align 8 708 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2 709 // CHECK1-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to [77 x i32]** 710 // CHECK1-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP41]], align 8 711 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 2 712 // CHECK1-NEXT: store i8* null, i8** [[TMP42]], align 8 713 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 714 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 715 // CHECK1-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 716 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0 717 // CHECK1-NEXT: store i32 1, i32* [[TMP45]], align 4 718 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1 719 // CHECK1-NEXT: store i32 3, i32* [[TMP46]], align 4 720 // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2 721 // CHECK1-NEXT: store i8** [[TMP43]], i8*** [[TMP47]], align 8 722 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3 723 // CHECK1-NEXT: store i8** [[TMP44]], i8*** [[TMP48]], align 8 724 // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4 725 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.22, i32 0, i32 0), i64** [[TMP49]], align 8 726 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5 727 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.23, i32 0, i32 0), i64** [[TMP50]], align 8 728 // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6 729 // CHECK1-NEXT: store i8** null, i8*** [[TMP51]], align 8 730 // CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7 731 // CHECK1-NEXT: store i8** null, i8*** [[TMP52]], align 8 732 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8 733 // CHECK1-NEXT: store i64 0, i64* [[TMP53]], align 8 734 // CHECK1-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]]) 735 // CHECK1-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 736 // CHECK1-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 737 // CHECK1: omp_offload.failed5: 738 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]] 739 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT6]] 740 // CHECK1: omp_offload.cont6: 741 // CHECK1-NEXT: ret void 742 // 743 // 744 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63 745 // CHECK1-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] { 746 // CHECK1-NEXT: entry: 747 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8 748 // CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8 749 // CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8 750 // CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8 751 // CHECK1-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8 752 // CHECK1-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8 753 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]]) 754 // CHECK1-NEXT: ret void 755 // 756 // 757 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16 758 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] { 759 // CHECK1-NEXT: entry: 760 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 761 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 762 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8 763 // CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8 764 // CHECK1-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4 765 // CHECK1-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 766 // CHECK1-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4 767 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 768 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 769 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 770 // CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8 771 // CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8 772 // CHECK1-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8 773 // CHECK1-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8 774 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8* 775 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8* 776 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false) 777 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0 778 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99 779 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]] 780 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 781 // CHECK1: omp.arrayinit.body: 782 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 783 // CHECK1-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 784 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 785 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]] 786 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 787 // CHECK1: omp.arrayinit.done: 788 // CHECK1-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32* 789 // CHECK1-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32* 790 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 791 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 792 // CHECK1-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8 793 // CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 794 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 795 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 796 // CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.17, [8 x i32]* @.gomp_critical_user_.reduction.var) 797 // CHECK1-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 798 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 799 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 800 // CHECK1-NEXT: ] 801 // CHECK1: .omp.reduction.case1: 802 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99 803 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]] 804 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 805 // CHECK1: omp.arraycpy.body: 806 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 807 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 808 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 809 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 810 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 811 // CHECK1-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 812 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 813 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 814 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]] 815 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 816 // CHECK1: omp.arraycpy.done6: 817 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 818 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 819 // CHECK1: .omp.reduction.case2: 820 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99 821 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]] 822 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 823 // CHECK1: omp.arraycpy.body8: 824 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 825 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 826 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 827 // CHECK1-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4 828 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 829 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 830 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]] 831 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 832 // CHECK1: omp.arraycpy.done14: 833 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 834 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 835 // CHECK1: .omp.reduction.default: 836 // CHECK1-NEXT: ret void 837 // 838 // 839 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.17 840 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 841 // CHECK1-NEXT: entry: 842 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 843 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 844 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 845 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 846 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 847 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 848 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 849 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 850 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 851 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 852 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 853 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 854 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 855 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 856 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99 857 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 858 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 859 // CHECK1: omp.arraycpy.body: 860 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 861 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 862 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 863 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 864 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 865 // CHECK1-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 866 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 867 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 868 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 869 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 870 // CHECK1: omp.arraycpy.done2: 871 // CHECK1-NEXT: ret void 872 // 873 // 874 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65 875 // CHECK1-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] { 876 // CHECK1-NEXT: entry: 877 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8 878 // CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8 879 // CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8 880 // CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8 881 // CHECK1-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8 882 // CHECK1-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8 883 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..20 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]]) 884 // CHECK1-NEXT: ret void 885 // 886 // 887 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..20 888 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] { 889 // CHECK1-NEXT: entry: 890 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 891 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 892 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8 893 // CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8 894 // CHECK1-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4 895 // CHECK1-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 896 // CHECK1-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4 897 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 898 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 899 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 900 // CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8 901 // CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8 902 // CHECK1-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8 903 // CHECK1-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8 904 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8* 905 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8* 906 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false) 907 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0 908 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99 909 // CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]] 910 // CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 911 // CHECK1: omp.arrayinit.body: 912 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 913 // CHECK1-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 914 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 915 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]] 916 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 917 // CHECK1: omp.arrayinit.done: 918 // CHECK1-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32* 919 // CHECK1-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32* 920 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 921 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 922 // CHECK1-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8 923 // CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 924 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 925 // CHECK1-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 926 // CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.21, [8 x i32]* @.gomp_critical_user_.reduction.var) 927 // CHECK1-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 928 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 929 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 930 // CHECK1-NEXT: ] 931 // CHECK1: .omp.reduction.case1: 932 // CHECK1-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99 933 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]] 934 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 935 // CHECK1: omp.arraycpy.body: 936 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 937 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 938 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 939 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 940 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 941 // CHECK1-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 942 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 943 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 944 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]] 945 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 946 // CHECK1: omp.arraycpy.done6: 947 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 948 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 949 // CHECK1: .omp.reduction.case2: 950 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99 951 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]] 952 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 953 // CHECK1: omp.arraycpy.body8: 954 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 955 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 956 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 957 // CHECK1-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4 958 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 959 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 960 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]] 961 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 962 // CHECK1: omp.arraycpy.done14: 963 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 964 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 965 // CHECK1: .omp.reduction.default: 966 // CHECK1-NEXT: ret void 967 // 968 // 969 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.21 970 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 971 // CHECK1-NEXT: entry: 972 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 973 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 974 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 975 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 976 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 977 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 978 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 979 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 980 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 981 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 982 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 983 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 984 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 985 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 986 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99 987 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 988 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 989 // CHECK1: omp.arraycpy.body: 990 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 991 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 992 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 993 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 994 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 995 // CHECK1-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 996 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 997 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 998 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 999 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 1000 // CHECK1: omp.arraycpy.done2: 1001 // CHECK1-NEXT: ret void 1002 // 1003 // 1004 // CHECK1-LABEL: define {{[^@]+}}@_Z9mapInt128v 1005 // CHECK1-SAME: () #[[ATTR0]] { 1006 // CHECK1-NEXT: entry: 1007 // CHECK1-NEXT: [[X:%.*]] = alloca i128, align 16 1008 // CHECK1-NEXT: [[Y:%.*]] = alloca i128, align 16 1009 // CHECK1-NEXT: [[Z:%.*]] = alloca i128, align 16 1010 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 1011 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 1012 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 1013 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 8 1014 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 8 1015 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 8 1016 // CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1017 // CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i128** 1018 // CHECK1-NEXT: store i128* [[Y]], i128** [[TMP1]], align 8 1019 // CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1020 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i128** 1021 // CHECK1-NEXT: store i128* [[Y]], i128** [[TMP3]], align 8 1022 // CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 1023 // CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 1024 // CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1025 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i128** 1026 // CHECK1-NEXT: store i128* [[Z]], i128** [[TMP6]], align 8 1027 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1028 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i128** 1029 // CHECK1-NEXT: store i128* [[Z]], i128** [[TMP8]], align 8 1030 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 1031 // CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8 1032 // CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1033 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i128** 1034 // CHECK1-NEXT: store i128* [[X]], i128** [[TMP11]], align 8 1035 // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1036 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i128** 1037 // CHECK1-NEXT: store i128* [[X]], i128** [[TMP13]], align 8 1038 // CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 1039 // CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 1040 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1041 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1042 // CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 1043 // CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 1044 // CHECK1-NEXT: store i32 1, i32* [[TMP17]], align 4 1045 // CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 1046 // CHECK1-NEXT: store i32 3, i32* [[TMP18]], align 4 1047 // CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 1048 // CHECK1-NEXT: store i8** [[TMP15]], i8*** [[TMP19]], align 8 1049 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 1050 // CHECK1-NEXT: store i8** [[TMP16]], i8*** [[TMP20]], align 8 1051 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 1052 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.26, i32 0, i32 0), i64** [[TMP21]], align 8 1053 // CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 1054 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.27, i32 0, i32 0), i64** [[TMP22]], align 8 1055 // CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 1056 // CHECK1-NEXT: store i8** null, i8*** [[TMP23]], align 8 1057 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 1058 // CHECK1-NEXT: store i8** null, i8*** [[TMP24]], align 8 1059 // CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 1060 // CHECK1-NEXT: store i64 0, i64* [[TMP25]], align 8 1061 // CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 1062 // CHECK1-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0 1063 // CHECK1-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1064 // CHECK1: omp_offload.failed: 1065 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72(i128* [[Y]], i128* [[Z]]) #[[ATTR2]] 1066 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] 1067 // CHECK1: omp_offload.cont: 1068 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 1069 // CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i128** 1070 // CHECK1-NEXT: store i128* [[Y]], i128** [[TMP29]], align 8 1071 // CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 1072 // CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i128** 1073 // CHECK1-NEXT: store i128* [[Y]], i128** [[TMP31]], align 8 1074 // CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0 1075 // CHECK1-NEXT: store i8* null, i8** [[TMP32]], align 8 1076 // CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1 1077 // CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i128** 1078 // CHECK1-NEXT: store i128* [[Z]], i128** [[TMP34]], align 8 1079 // CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1 1080 // CHECK1-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i128** 1081 // CHECK1-NEXT: store i128* [[Z]], i128** [[TMP36]], align 8 1082 // CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 1 1083 // CHECK1-NEXT: store i8* null, i8** [[TMP37]], align 8 1084 // CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2 1085 // CHECK1-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i128** 1086 // CHECK1-NEXT: store i128* [[X]], i128** [[TMP39]], align 8 1087 // CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2 1088 // CHECK1-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i128** 1089 // CHECK1-NEXT: store i128* [[X]], i128** [[TMP41]], align 8 1090 // CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 2 1091 // CHECK1-NEXT: store i8* null, i8** [[TMP42]], align 8 1092 // CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 1093 // CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 1094 // CHECK1-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 1095 // CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0 1096 // CHECK1-NEXT: store i32 1, i32* [[TMP45]], align 4 1097 // CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1 1098 // CHECK1-NEXT: store i32 3, i32* [[TMP46]], align 4 1099 // CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2 1100 // CHECK1-NEXT: store i8** [[TMP43]], i8*** [[TMP47]], align 8 1101 // CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3 1102 // CHECK1-NEXT: store i8** [[TMP44]], i8*** [[TMP48]], align 8 1103 // CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4 1104 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64** [[TMP49]], align 8 1105 // CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5 1106 // CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i64** [[TMP50]], align 8 1107 // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6 1108 // CHECK1-NEXT: store i8** null, i8*** [[TMP51]], align 8 1109 // CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7 1110 // CHECK1-NEXT: store i8** null, i8*** [[TMP52]], align 8 1111 // CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8 1112 // CHECK1-NEXT: store i64 0, i64* [[TMP53]], align 8 1113 // CHECK1-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]]) 1114 // CHECK1-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 1115 // CHECK1-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 1116 // CHECK1: omp_offload.failed5: 1117 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74(i128* [[Y]], i128* [[Z]]) #[[ATTR2]] 1118 // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT6]] 1119 // CHECK1: omp_offload.cont6: 1120 // CHECK1-NEXT: ret void 1121 // 1122 // 1123 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72 1124 // CHECK1-SAME: (i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] { 1125 // CHECK1-NEXT: entry: 1126 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8 1127 // CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8 1128 // CHECK1-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8 1129 // CHECK1-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8 1130 // CHECK1-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8 1131 // CHECK1-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8 1132 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]]) 1133 // CHECK1-NEXT: ret void 1134 // 1135 // 1136 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..24 1137 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] { 1138 // CHECK1-NEXT: entry: 1139 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1140 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1141 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8 1142 // CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8 1143 // CHECK1-NEXT: [[Y1:%.*]] = alloca i128, align 16 1144 // CHECK1-NEXT: [[X:%.*]] = alloca i128, align 16 1145 // CHECK1-NEXT: [[Z2:%.*]] = alloca i128, align 16 1146 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 1147 // CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16 1148 // CHECK1-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16 1149 // CHECK1-NEXT: [[TMP:%.*]] = alloca i128, align 16 1150 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1151 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1152 // CHECK1-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8 1153 // CHECK1-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8 1154 // CHECK1-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8 1155 // CHECK1-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8 1156 // CHECK1-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16 1157 // CHECK1-NEXT: store i128 [[TMP2]], i128* [[Y1]], align 16 1158 // CHECK1-NEXT: store i128 0, i128* [[Z2]], align 16 1159 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 1160 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8* 1161 // CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8 1162 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1163 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 1164 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1165 // CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.25, [8 x i32]* @.gomp_critical_user_.reduction.var) 1166 // CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1167 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1168 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1169 // CHECK1-NEXT: ] 1170 // CHECK1: .omp.reduction.case1: 1171 // CHECK1-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16 1172 // CHECK1-NEXT: [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16 1173 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]] 1174 // CHECK1-NEXT: store i128 [[ADD]], i128* [[TMP1]], align 16 1175 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1176 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1177 // CHECK1: .omp.reduction.case2: 1178 // CHECK1-NEXT: [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16 1179 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8* 1180 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8* 1181 // CHECK1-NEXT: call void @__atomic_load(i64 noundef 16, i8* noundef [[TMP12]], i8* noundef [[TMP13]], i32 noundef signext 0) 1182 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] 1183 // CHECK1: atomic_cont: 1184 // CHECK1-NEXT: [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16 1185 // CHECK1-NEXT: store i128 [[TMP14]], i128* [[TMP]], align 16 1186 // CHECK1-NEXT: [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16 1187 // CHECK1-NEXT: [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16 1188 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]] 1189 // CHECK1-NEXT: store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16 1190 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8* 1191 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8* 1192 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8* 1193 // CHECK1-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP17]], i8* noundef [[TMP18]], i8* noundef [[TMP19]], i32 noundef signext 0, i32 noundef signext 0) 1194 // CHECK1-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]] 1195 // CHECK1: atomic_exit: 1196 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1197 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1198 // CHECK1: .omp.reduction.default: 1199 // CHECK1-NEXT: ret void 1200 // 1201 // 1202 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.25 1203 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1204 // CHECK1-NEXT: entry: 1205 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1206 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 1207 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1208 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 1209 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 1210 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1211 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 1212 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1213 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 1214 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1215 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128* 1216 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 1217 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 1218 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128* 1219 // CHECK1-NEXT: [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16 1220 // CHECK1-NEXT: [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16 1221 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]] 1222 // CHECK1-NEXT: store i128 [[ADD]], i128* [[TMP11]], align 16 1223 // CHECK1-NEXT: ret void 1224 // 1225 // 1226 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74 1227 // CHECK1-SAME: (i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] { 1228 // CHECK1-NEXT: entry: 1229 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8 1230 // CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8 1231 // CHECK1-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8 1232 // CHECK1-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8 1233 // CHECK1-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8 1234 // CHECK1-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8 1235 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..28 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]]) 1236 // CHECK1-NEXT: ret void 1237 // 1238 // 1239 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..28 1240 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] { 1241 // CHECK1-NEXT: entry: 1242 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1243 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1244 // CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8 1245 // CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8 1246 // CHECK1-NEXT: [[Y1:%.*]] = alloca i128, align 16 1247 // CHECK1-NEXT: [[X:%.*]] = alloca i128, align 16 1248 // CHECK1-NEXT: [[Z2:%.*]] = alloca i128, align 16 1249 // CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 1250 // CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16 1251 // CHECK1-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16 1252 // CHECK1-NEXT: [[TMP:%.*]] = alloca i128, align 16 1253 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1254 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1255 // CHECK1-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8 1256 // CHECK1-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8 1257 // CHECK1-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8 1258 // CHECK1-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8 1259 // CHECK1-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16 1260 // CHECK1-NEXT: store i128 [[TMP2]], i128* [[Y1]], align 16 1261 // CHECK1-NEXT: store i128 0, i128* [[Z2]], align 16 1262 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 1263 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8* 1264 // CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8 1265 // CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1266 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 1267 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1268 // CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.29, [8 x i32]* @.gomp_critical_user_.reduction.var) 1269 // CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1270 // CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1271 // CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1272 // CHECK1-NEXT: ] 1273 // CHECK1: .omp.reduction.case1: 1274 // CHECK1-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16 1275 // CHECK1-NEXT: [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16 1276 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]] 1277 // CHECK1-NEXT: store i128 [[ADD]], i128* [[TMP1]], align 16 1278 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1279 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1280 // CHECK1: .omp.reduction.case2: 1281 // CHECK1-NEXT: [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16 1282 // CHECK1-NEXT: [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8* 1283 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8* 1284 // CHECK1-NEXT: call void @__atomic_load(i64 noundef 16, i8* noundef [[TMP12]], i8* noundef [[TMP13]], i32 noundef signext 0) 1285 // CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] 1286 // CHECK1: atomic_cont: 1287 // CHECK1-NEXT: [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16 1288 // CHECK1-NEXT: store i128 [[TMP14]], i128* [[TMP]], align 16 1289 // CHECK1-NEXT: [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16 1290 // CHECK1-NEXT: [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16 1291 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]] 1292 // CHECK1-NEXT: store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16 1293 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8* 1294 // CHECK1-NEXT: [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8* 1295 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8* 1296 // CHECK1-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP17]], i8* noundef [[TMP18]], i8* noundef [[TMP19]], i32 noundef signext 0, i32 noundef signext 0) 1297 // CHECK1-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]] 1298 // CHECK1: atomic_exit: 1299 // CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1300 // CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1301 // CHECK1: .omp.reduction.default: 1302 // CHECK1-NEXT: ret void 1303 // 1304 // 1305 // CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.29 1306 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 1307 // CHECK1-NEXT: entry: 1308 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1309 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 1310 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1311 // CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 1312 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 1313 // CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 1314 // CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 1315 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 1316 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 1317 // CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 1318 // CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128* 1319 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 1320 // CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 1321 // CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128* 1322 // CHECK1-NEXT: [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16 1323 // CHECK1-NEXT: [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16 1324 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]] 1325 // CHECK1-NEXT: store i128 [[ADD]], i128* [[TMP11]], align 16 1326 // CHECK1-NEXT: ret void 1327 // 1328 // 1329 // CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 1330 // CHECK1-SAME: () #[[ATTR7:[0-9]+]] { 1331 // CHECK1-NEXT: entry: 1332 // CHECK1-NEXT: call void @__tgt_register_requires(i64 1) 1333 // CHECK1-NEXT: ret void 1334 // 1335 // 1336 // CHECK3-LABEL: define {{[^@]+}}@_Z14mapWithPrivatev 1337 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] { 1338 // CHECK3-NEXT: entry: 1339 // CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4 1340 // CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4 1341 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4 1342 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4 1343 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4 1344 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1345 // CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 1346 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4 1347 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1348 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 1349 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4 1350 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1351 // CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4 1352 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1353 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32** 1354 // CHECK3-NEXT: store i32* [[Y]], i32** [[TMP6]], align 4 1355 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1356 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32** 1357 // CHECK3-NEXT: store i32* [[Y]], i32** [[TMP8]], align 4 1358 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1359 // CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4 1360 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1361 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1362 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 1363 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 1364 // CHECK3-NEXT: store i32 1, i32* [[TMP12]], align 4 1365 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 1366 // CHECK3-NEXT: store i32 2, i32* [[TMP13]], align 4 1367 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 1368 // CHECK3-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 4 1369 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 1370 // CHECK3-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 4 1371 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 1372 // CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP16]], align 4 1373 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 1374 // CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP17]], align 4 1375 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 1376 // CHECK3-NEXT: store i8** null, i8*** [[TMP18]], align 4 1377 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 1378 // CHECK3-NEXT: store i8** null, i8*** [[TMP19]], align 4 1379 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 1380 // CHECK3-NEXT: store i64 0, i64* [[TMP20]], align 8 1381 // CHECK3-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 1382 // CHECK3-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 1383 // CHECK3-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1384 // CHECK3: omp_offload.failed: 1385 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27() #[[ATTR2:[0-9]+]] 1386 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1387 // CHECK3: omp_offload.cont: 1388 // CHECK3-NEXT: ret void 1389 // 1390 // 1391 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27 1392 // CHECK3-SAME: () #[[ATTR1:[0-9]+]] { 1393 // CHECK3-NEXT: entry: 1394 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 1395 // CHECK3-NEXT: ret void 1396 // 1397 // 1398 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. 1399 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] { 1400 // CHECK3-NEXT: entry: 1401 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1402 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1403 // CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4 1404 // CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4 1405 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1406 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1407 // CHECK3-NEXT: ret void 1408 // 1409 // 1410 // CHECK3-LABEL: define {{[^@]+}}@_Z19mapWithFirstprivatev 1411 // CHECK3-SAME: () #[[ATTR0]] { 1412 // CHECK3-NEXT: entry: 1413 // CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4 1414 // CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4 1415 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4 1416 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4 1417 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4 1418 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1419 // CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 1420 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4 1421 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1422 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 1423 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4 1424 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1425 // CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4 1426 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1427 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32** 1428 // CHECK3-NEXT: store i32* [[Y]], i32** [[TMP6]], align 4 1429 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1430 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32** 1431 // CHECK3-NEXT: store i32* [[Y]], i32** [[TMP8]], align 4 1432 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1433 // CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4 1434 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1435 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1436 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 1437 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 1438 // CHECK3-NEXT: store i32 1, i32* [[TMP12]], align 4 1439 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 1440 // CHECK3-NEXT: store i32 2, i32* [[TMP13]], align 4 1441 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 1442 // CHECK3-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 4 1443 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 1444 // CHECK3-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 4 1445 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 1446 // CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP16]], align 4 1447 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 1448 // CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP17]], align 4 1449 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 1450 // CHECK3-NEXT: store i8** null, i8*** [[TMP18]], align 4 1451 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 1452 // CHECK3-NEXT: store i8** null, i8*** [[TMP19]], align 4 1453 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 1454 // CHECK3-NEXT: store i64 0, i64* [[TMP20]], align 8 1455 // CHECK3-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 1456 // CHECK3-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 1457 // CHECK3-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1458 // CHECK3: omp_offload.failed: 1459 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33(i32* [[X]], i32* [[Y]]) #[[ATTR2]] 1460 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1461 // CHECK3: omp_offload.cont: 1462 // CHECK3-NEXT: ret void 1463 // 1464 // 1465 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33 1466 // CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] { 1467 // CHECK3-NEXT: entry: 1468 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 1469 // CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4 1470 // CHECK3-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4 1471 // CHECK3-NEXT: [[Y_CASTED:%.*]] = alloca i32, align 4 1472 // CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 1473 // CHECK3-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4 1474 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 1475 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4 1476 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 1477 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[X_CASTED]], align 4 1478 // CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[X_CASTED]], align 4 1479 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4 1480 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[Y_CASTED]], align 4 1481 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[Y_CASTED]], align 4 1482 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP5]]) 1483 // CHECK3-NEXT: ret void 1484 // 1485 // 1486 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1 1487 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) #[[ATTR1]] { 1488 // CHECK3-NEXT: entry: 1489 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1490 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1491 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4 1492 // CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca i32, align 4 1493 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1494 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1495 // CHECK3-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4 1496 // CHECK3-NEXT: store i32 [[Y]], i32* [[Y_ADDR]], align 4 1497 // CHECK3-NEXT: ret void 1498 // 1499 // 1500 // CHECK3-LABEL: define {{[^@]+}}@_Z16mapWithReductionv 1501 // CHECK3-SAME: () #[[ATTR0]] { 1502 // CHECK3-NEXT: entry: 1503 // CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4 1504 // CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4 1505 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4 1506 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4 1507 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4 1508 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1509 // CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 1510 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4 1511 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1512 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 1513 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4 1514 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1515 // CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4 1516 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1517 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32** 1518 // CHECK3-NEXT: store i32* [[Y]], i32** [[TMP6]], align 4 1519 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1520 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32** 1521 // CHECK3-NEXT: store i32* [[Y]], i32** [[TMP8]], align 4 1522 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1523 // CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4 1524 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1525 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1526 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 1527 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 1528 // CHECK3-NEXT: store i32 1, i32* [[TMP12]], align 4 1529 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 1530 // CHECK3-NEXT: store i32 2, i32* [[TMP13]], align 4 1531 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 1532 // CHECK3-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 4 1533 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 1534 // CHECK3-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 4 1535 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 1536 // CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP16]], align 4 1537 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 1538 // CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP17]], align 4 1539 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 1540 // CHECK3-NEXT: store i8** null, i8*** [[TMP18]], align 4 1541 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 1542 // CHECK3-NEXT: store i8** null, i8*** [[TMP19]], align 4 1543 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 1544 // CHECK3-NEXT: store i64 0, i64* [[TMP20]], align 8 1545 // CHECK3-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 1546 // CHECK3-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0 1547 // CHECK3-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1548 // CHECK3: omp_offload.failed: 1549 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39(i32* [[X]], i32* [[Y]]) #[[ATTR2]] 1550 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1551 // CHECK3: omp_offload.cont: 1552 // CHECK3-NEXT: ret void 1553 // 1554 // 1555 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39 1556 // CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] { 1557 // CHECK3-NEXT: entry: 1558 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 1559 // CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4 1560 // CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 1561 // CHECK3-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4 1562 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 1563 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4 1564 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]]) 1565 // CHECK3-NEXT: ret void 1566 // 1567 // 1568 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4 1569 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] { 1570 // CHECK3-NEXT: entry: 1571 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1572 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1573 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 1574 // CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4 1575 // CHECK3-NEXT: [[X1:%.*]] = alloca i32, align 4 1576 // CHECK3-NEXT: [[Y2:%.*]] = alloca i32, align 4 1577 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4 1578 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1579 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1580 // CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 1581 // CHECK3-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4 1582 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 1583 // CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4 1584 // CHECK3-NEXT: store i32 0, i32* [[X1]], align 4 1585 // CHECK3-NEXT: store i32 0, i32* [[Y2]], align 4 1586 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 1587 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i32* [[X1]] to i8* 1588 // CHECK3-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 4 1589 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1 1590 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8* 1591 // CHECK3-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 4 1592 // CHECK3-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 1593 // CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 1594 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 1595 // CHECK3-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i32 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 1596 // CHECK3-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 1597 // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 1598 // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 1599 // CHECK3-NEXT: ] 1600 // CHECK3: .omp.reduction.case1: 1601 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4 1602 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[X1]], align 4 1603 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] 1604 // CHECK3-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4 1605 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4 1606 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4 1607 // CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 1608 // CHECK3-NEXT: store i32 [[ADD3]], i32* [[TMP1]], align 4 1609 // CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1610 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1611 // CHECK3: .omp.reduction.case2: 1612 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[X1]], align 4 1613 // CHECK3-NEXT: [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4 1614 // CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4 1615 // CHECK3-NEXT: [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4 1616 // CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var) 1617 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 1618 // CHECK3: .omp.reduction.default: 1619 // CHECK3-NEXT: ret void 1620 // 1621 // 1622 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 1623 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { 1624 // CHECK3-NEXT: entry: 1625 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 1626 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 1627 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 1628 // CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 1629 // CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 1630 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]* 1631 // CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 1632 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]* 1633 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0 1634 // CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 1635 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 1636 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 0 1637 // CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 1638 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 1639 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1 1640 // CHECK3-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4 1641 // CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* 1642 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 1 1643 // CHECK3-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4 1644 // CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32* 1645 // CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4 1646 // CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4 1647 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 1648 // CHECK3-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 1649 // CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4 1650 // CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4 1651 // CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] 1652 // CHECK3-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4 1653 // CHECK3-NEXT: ret void 1654 // 1655 // 1656 // CHECK3-LABEL: define {{[^@]+}}@_Z7mapFromv 1657 // CHECK3-SAME: () #[[ATTR0]] { 1658 // CHECK3-NEXT: entry: 1659 // CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4 1660 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 1661 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 1662 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 1663 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1664 // CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 1665 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4 1666 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1667 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 1668 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4 1669 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1670 // CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4 1671 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1672 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1673 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 1674 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 1675 // CHECK3-NEXT: store i32 1, i32* [[TMP7]], align 4 1676 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 1677 // CHECK3-NEXT: store i32 1, i32* [[TMP8]], align 4 1678 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 1679 // CHECK3-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 4 1680 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 1681 // CHECK3-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 4 1682 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 1683 // CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64** [[TMP11]], align 4 1684 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 1685 // CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP12]], align 4 1686 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 1687 // CHECK3-NEXT: store i8** null, i8*** [[TMP13]], align 4 1688 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 1689 // CHECK3-NEXT: store i8** null, i8*** [[TMP14]], align 4 1690 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 1691 // CHECK3-NEXT: store i64 0, i64* [[TMP15]], align 8 1692 // CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 1693 // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 1694 // CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1695 // CHECK3: omp_offload.failed: 1696 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45(i32* [[X]]) #[[ATTR2]] 1697 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1698 // CHECK3: omp_offload.cont: 1699 // CHECK3-NEXT: ret void 1700 // 1701 // 1702 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45 1703 // CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] { 1704 // CHECK3-NEXT: entry: 1705 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 1706 // CHECK3-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4 1707 // CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 1708 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 1709 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1710 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4 1711 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4 1712 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP2]]) 1713 // CHECK3-NEXT: ret void 1714 // 1715 // 1716 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7 1717 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR1]] { 1718 // CHECK3-NEXT: entry: 1719 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1720 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1721 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4 1722 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1723 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1724 // CHECK3-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4 1725 // CHECK3-NEXT: ret void 1726 // 1727 // 1728 // CHECK3-LABEL: define {{[^@]+}}@_Z5mapTov 1729 // CHECK3-SAME: () #[[ATTR0]] { 1730 // CHECK3-NEXT: entry: 1731 // CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4 1732 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 1733 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 1734 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 1735 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1736 // CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 1737 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4 1738 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1739 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 1740 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4 1741 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1742 // CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4 1743 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1744 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1745 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 1746 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 1747 // CHECK3-NEXT: store i32 1, i32* [[TMP7]], align 4 1748 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 1749 // CHECK3-NEXT: store i32 1, i32* [[TMP8]], align 4 1750 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 1751 // CHECK3-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 4 1752 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 1753 // CHECK3-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 4 1754 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 1755 // CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64** [[TMP11]], align 4 1756 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 1757 // CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP12]], align 4 1758 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 1759 // CHECK3-NEXT: store i8** null, i8*** [[TMP13]], align 4 1760 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 1761 // CHECK3-NEXT: store i8** null, i8*** [[TMP14]], align 4 1762 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 1763 // CHECK3-NEXT: store i64 0, i64* [[TMP15]], align 8 1764 // CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 1765 // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 1766 // CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1767 // CHECK3: omp_offload.failed: 1768 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51(i32* [[X]]) #[[ATTR2]] 1769 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1770 // CHECK3: omp_offload.cont: 1771 // CHECK3-NEXT: ret void 1772 // 1773 // 1774 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51 1775 // CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] { 1776 // CHECK3-NEXT: entry: 1777 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 1778 // CHECK3-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4 1779 // CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 1780 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 1781 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1782 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4 1783 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4 1784 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP2]]) 1785 // CHECK3-NEXT: ret void 1786 // 1787 // 1788 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10 1789 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR1]] { 1790 // CHECK3-NEXT: entry: 1791 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1792 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1793 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4 1794 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1795 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1796 // CHECK3-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4 1797 // CHECK3-NEXT: ret void 1798 // 1799 // 1800 // CHECK3-LABEL: define {{[^@]+}}@_Z8mapAllocv 1801 // CHECK3-SAME: () #[[ATTR0]] { 1802 // CHECK3-NEXT: entry: 1803 // CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4 1804 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4 1805 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4 1806 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4 1807 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1808 // CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32** 1809 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4 1810 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1811 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32** 1812 // CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4 1813 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1814 // CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4 1815 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1816 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1817 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 1818 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 1819 // CHECK3-NEXT: store i32 1, i32* [[TMP7]], align 4 1820 // CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 1821 // CHECK3-NEXT: store i32 1, i32* [[TMP8]], align 4 1822 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 1823 // CHECK3-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 4 1824 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 1825 // CHECK3-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 4 1826 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 1827 // CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP11]], align 4 1828 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 1829 // CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP12]], align 4 1830 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 1831 // CHECK3-NEXT: store i8** null, i8*** [[TMP13]], align 4 1832 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 1833 // CHECK3-NEXT: store i8** null, i8*** [[TMP14]], align 4 1834 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 1835 // CHECK3-NEXT: store i64 0, i64* [[TMP15]], align 8 1836 // CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 1837 // CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 1838 // CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1839 // CHECK3: omp_offload.failed: 1840 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57(i32* [[X]]) #[[ATTR2]] 1841 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1842 // CHECK3: omp_offload.cont: 1843 // CHECK3-NEXT: ret void 1844 // 1845 // 1846 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57 1847 // CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] { 1848 // CHECK3-NEXT: entry: 1849 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 1850 // CHECK3-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4 1851 // CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 1852 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 1853 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 1854 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4 1855 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4 1856 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]]) 1857 // CHECK3-NEXT: ret void 1858 // 1859 // 1860 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13 1861 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR1]] { 1862 // CHECK3-NEXT: entry: 1863 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 1864 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 1865 // CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4 1866 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 1867 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 1868 // CHECK3-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4 1869 // CHECK3-NEXT: ret void 1870 // 1871 // 1872 // CHECK3-LABEL: define {{[^@]+}}@_Z8mapArrayv 1873 // CHECK3-SAME: () #[[ATTR0]] { 1874 // CHECK3-NEXT: entry: 1875 // CHECK3-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 1876 // CHECK3-NEXT: [[Y:%.*]] = alloca [88 x i32], align 4 1877 // CHECK3-NEXT: [[Z:%.*]] = alloca [99 x i32], align 4 1878 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 1879 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 1880 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 1881 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 4 1882 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 4 1883 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 4 1884 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1885 // CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [88 x i32]** 1886 // CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP1]], align 4 1887 // CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1888 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [88 x i32]** 1889 // CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP3]], align 4 1890 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 1891 // CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4 1892 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 1893 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [99 x i32]** 1894 // CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP6]], align 4 1895 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 1896 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [99 x i32]** 1897 // CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP8]], align 4 1898 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 1899 // CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4 1900 // CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 1901 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to [77 x i32]** 1902 // CHECK3-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP11]], align 4 1903 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 1904 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to [77 x i32]** 1905 // CHECK3-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP13]], align 4 1906 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 1907 // CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 1908 // CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 1909 // CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 1910 // CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8 1911 // CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0 1912 // CHECK3-NEXT: store i32 1, i32* [[TMP17]], align 4 1913 // CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1 1914 // CHECK3-NEXT: store i32 3, i32* [[TMP18]], align 4 1915 // CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2 1916 // CHECK3-NEXT: store i8** [[TMP15]], i8*** [[TMP19]], align 4 1917 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3 1918 // CHECK3-NEXT: store i8** [[TMP16]], i8*** [[TMP20]], align 4 1919 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4 1920 // CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.18, i32 0, i32 0), i64** [[TMP21]], align 4 1921 // CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5 1922 // CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.19, i32 0, i32 0), i64** [[TMP22]], align 4 1923 // CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6 1924 // CHECK3-NEXT: store i8** null, i8*** [[TMP23]], align 4 1925 // CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7 1926 // CHECK3-NEXT: store i8** null, i8*** [[TMP24]], align 4 1927 // CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8 1928 // CHECK3-NEXT: store i64 0, i64* [[TMP25]], align 8 1929 // CHECK3-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]]) 1930 // CHECK3-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0 1931 // CHECK3-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] 1932 // CHECK3: omp_offload.failed: 1933 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]] 1934 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] 1935 // CHECK3: omp_offload.cont: 1936 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 1937 // CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to [88 x i32]** 1938 // CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP29]], align 4 1939 // CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 1940 // CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to [88 x i32]** 1941 // CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP31]], align 4 1942 // CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0 1943 // CHECK3-NEXT: store i8* null, i8** [[TMP32]], align 4 1944 // CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1 1945 // CHECK3-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to [99 x i32]** 1946 // CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP34]], align 4 1947 // CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1 1948 // CHECK3-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [99 x i32]** 1949 // CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP36]], align 4 1950 // CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 1 1951 // CHECK3-NEXT: store i8* null, i8** [[TMP37]], align 4 1952 // CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2 1953 // CHECK3-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [77 x i32]** 1954 // CHECK3-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP39]], align 4 1955 // CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2 1956 // CHECK3-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to [77 x i32]** 1957 // CHECK3-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP41]], align 4 1958 // CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 2 1959 // CHECK3-NEXT: store i8* null, i8** [[TMP42]], align 4 1960 // CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 1961 // CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 1962 // CHECK3-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8 1963 // CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0 1964 // CHECK3-NEXT: store i32 1, i32* [[TMP45]], align 4 1965 // CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1 1966 // CHECK3-NEXT: store i32 3, i32* [[TMP46]], align 4 1967 // CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2 1968 // CHECK3-NEXT: store i8** [[TMP43]], i8*** [[TMP47]], align 4 1969 // CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3 1970 // CHECK3-NEXT: store i8** [[TMP44]], i8*** [[TMP48]], align 4 1971 // CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4 1972 // CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.22, i32 0, i32 0), i64** [[TMP49]], align 4 1973 // CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5 1974 // CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.23, i32 0, i32 0), i64** [[TMP50]], align 4 1975 // CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6 1976 // CHECK3-NEXT: store i8** null, i8*** [[TMP51]], align 4 1977 // CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7 1978 // CHECK3-NEXT: store i8** null, i8*** [[TMP52]], align 4 1979 // CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8 1980 // CHECK3-NEXT: store i64 0, i64* [[TMP53]], align 8 1981 // CHECK3-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]]) 1982 // CHECK3-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 1983 // CHECK3-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] 1984 // CHECK3: omp_offload.failed5: 1985 // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]] 1986 // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT6]] 1987 // CHECK3: omp_offload.cont6: 1988 // CHECK3-NEXT: ret void 1989 // 1990 // 1991 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63 1992 // CHECK3-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] { 1993 // CHECK3-NEXT: entry: 1994 // CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4 1995 // CHECK3-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4 1996 // CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4 1997 // CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4 1998 // CHECK3-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4 1999 // CHECK3-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4 2000 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]]) 2001 // CHECK3-NEXT: ret void 2002 // 2003 // 2004 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 2005 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] { 2006 // CHECK3-NEXT: entry: 2007 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2008 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2009 // CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4 2010 // CHECK3-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4 2011 // CHECK3-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4 2012 // CHECK3-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 2013 // CHECK3-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4 2014 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 2015 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2016 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2017 // CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4 2018 // CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4 2019 // CHECK3-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4 2020 // CHECK3-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4 2021 // CHECK3-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8* 2022 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8* 2023 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false) 2024 // CHECK3-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0 2025 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99 2026 // CHECK3-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]] 2027 // CHECK3-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 2028 // CHECK3: omp.arrayinit.body: 2029 // CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 2030 // CHECK3-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2031 // CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 2032 // CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]] 2033 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 2034 // CHECK3: omp.arrayinit.done: 2035 // CHECK3-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32* 2036 // CHECK3-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32* 2037 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 2038 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 2039 // CHECK3-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4 2040 // CHECK3-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 2041 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2042 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2043 // CHECK3-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.17, [8 x i32]* @.gomp_critical_user_.reduction.var) 2044 // CHECK3-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2045 // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2046 // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2047 // CHECK3-NEXT: ] 2048 // CHECK3: .omp.reduction.case1: 2049 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99 2050 // CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]] 2051 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 2052 // CHECK3: omp.arraycpy.body: 2053 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2054 // CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2055 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 2056 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 2057 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2058 // CHECK3-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 2059 // CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 2060 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 2061 // CHECK3-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]] 2062 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 2063 // CHECK3: omp.arraycpy.done6: 2064 // CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2065 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2066 // CHECK3: .omp.reduction.case2: 2067 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99 2068 // CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]] 2069 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 2070 // CHECK3: omp.arraycpy.body8: 2071 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2072 // CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2073 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 2074 // CHECK3-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4 2075 // CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 2076 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 2077 // CHECK3-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]] 2078 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 2079 // CHECK3: omp.arraycpy.done14: 2080 // CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2081 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2082 // CHECK3: .omp.reduction.default: 2083 // CHECK3-NEXT: ret void 2084 // 2085 // 2086 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.17 2087 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 2088 // CHECK3-NEXT: entry: 2089 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 2090 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 2091 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 2092 // CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 2093 // CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 2094 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2095 // CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 2096 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2097 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 2098 // CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 2099 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 2100 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 2101 // CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 2102 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 2103 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99 2104 // CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 2105 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 2106 // CHECK3: omp.arraycpy.body: 2107 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2108 // CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2109 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2110 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 2111 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 2112 // CHECK3-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2113 // CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 2114 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 2115 // CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 2116 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 2117 // CHECK3: omp.arraycpy.done2: 2118 // CHECK3-NEXT: ret void 2119 // 2120 // 2121 // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65 2122 // CHECK3-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] { 2123 // CHECK3-NEXT: entry: 2124 // CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4 2125 // CHECK3-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4 2126 // CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4 2127 // CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4 2128 // CHECK3-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4 2129 // CHECK3-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4 2130 // CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..20 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]]) 2131 // CHECK3-NEXT: ret void 2132 // 2133 // 2134 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..20 2135 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] { 2136 // CHECK3-NEXT: entry: 2137 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2138 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2139 // CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4 2140 // CHECK3-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4 2141 // CHECK3-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4 2142 // CHECK3-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 2143 // CHECK3-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4 2144 // CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 2145 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2146 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2147 // CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4 2148 // CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4 2149 // CHECK3-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4 2150 // CHECK3-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4 2151 // CHECK3-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8* 2152 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8* 2153 // CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false) 2154 // CHECK3-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0 2155 // CHECK3-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99 2156 // CHECK3-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]] 2157 // CHECK3-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 2158 // CHECK3: omp.arrayinit.body: 2159 // CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 2160 // CHECK3-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2161 // CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 2162 // CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]] 2163 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 2164 // CHECK3: omp.arrayinit.done: 2165 // CHECK3-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32* 2166 // CHECK3-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32* 2167 // CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 2168 // CHECK3-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 2169 // CHECK3-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4 2170 // CHECK3-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 2171 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2172 // CHECK3-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2173 // CHECK3-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.21, [8 x i32]* @.gomp_critical_user_.reduction.var) 2174 // CHECK3-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2175 // CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2176 // CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2177 // CHECK3-NEXT: ] 2178 // CHECK3: .omp.reduction.case1: 2179 // CHECK3-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99 2180 // CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]] 2181 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 2182 // CHECK3: omp.arraycpy.body: 2183 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2184 // CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2185 // CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 2186 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 2187 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2188 // CHECK3-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 2189 // CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 2190 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 2191 // CHECK3-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]] 2192 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 2193 // CHECK3: omp.arraycpy.done6: 2194 // CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2195 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2196 // CHECK3: .omp.reduction.case2: 2197 // CHECK3-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99 2198 // CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]] 2199 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 2200 // CHECK3: omp.arraycpy.body8: 2201 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2202 // CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2203 // CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 2204 // CHECK3-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4 2205 // CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 2206 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 2207 // CHECK3-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]] 2208 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 2209 // CHECK3: omp.arraycpy.done14: 2210 // CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2211 // CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2212 // CHECK3: .omp.reduction.default: 2213 // CHECK3-NEXT: ret void 2214 // 2215 // 2216 // CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.21 2217 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { 2218 // CHECK3-NEXT: entry: 2219 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 2220 // CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 2221 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 2222 // CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 2223 // CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 2224 // CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2225 // CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 2226 // CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2227 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 2228 // CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 2229 // CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 2230 // CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 2231 // CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 2232 // CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 2233 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99 2234 // CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 2235 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 2236 // CHECK3: omp.arraycpy.body: 2237 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2238 // CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2239 // CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2240 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 2241 // CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 2242 // CHECK3-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2243 // CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 2244 // CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 2245 // CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 2246 // CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 2247 // CHECK3: omp.arraycpy.done2: 2248 // CHECK3-NEXT: ret void 2249 // 2250 // 2251 // CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg 2252 // CHECK3-SAME: () #[[ATTR6:[0-9]+]] { 2253 // CHECK3-NEXT: entry: 2254 // CHECK3-NEXT: call void @__tgt_register_requires(i64 1) 2255 // CHECK3-NEXT: ret void 2256 // 2257 // 2258 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27 2259 // CHECK5-SAME: () #[[ATTR0:[0-9]+]] { 2260 // CHECK5-NEXT: entry: 2261 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 2262 // CHECK5-NEXT: ret void 2263 // 2264 // 2265 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined. 2266 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2267 // CHECK5-NEXT: entry: 2268 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2269 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2270 // CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4 2271 // CHECK5-NEXT: [[Y:%.*]] = alloca i32, align 4 2272 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2273 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2274 // CHECK5-NEXT: ret void 2275 // 2276 // 2277 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33 2278 // CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] { 2279 // CHECK5-NEXT: entry: 2280 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 2281 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8 2282 // CHECK5-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8 2283 // CHECK5-NEXT: [[Y_CASTED:%.*]] = alloca i64, align 8 2284 // CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 2285 // CHECK5-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8 2286 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 2287 // CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8 2288 // CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 2289 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32* 2290 // CHECK5-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4 2291 // CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[X_CASTED]], align 8 2292 // CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4 2293 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[Y_CASTED]] to i32* 2294 // CHECK5-NEXT: store i32 [[TMP4]], i32* [[CONV1]], align 4 2295 // CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[Y_CASTED]], align 8 2296 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP5]]) 2297 // CHECK5-NEXT: ret void 2298 // 2299 // 2300 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..1 2301 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]], i64 noundef [[Y:%.*]]) #[[ATTR0]] { 2302 // CHECK5-NEXT: entry: 2303 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2304 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2305 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8 2306 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i64, align 8 2307 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2308 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2309 // CHECK5-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8 2310 // CHECK5-NEXT: store i64 [[Y]], i64* [[Y_ADDR]], align 8 2311 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32* 2312 // CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[Y_ADDR]] to i32* 2313 // CHECK5-NEXT: ret void 2314 // 2315 // 2316 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39 2317 // CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] { 2318 // CHECK5-NEXT: entry: 2319 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 2320 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8 2321 // CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 2322 // CHECK5-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8 2323 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 2324 // CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8 2325 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]]) 2326 // CHECK5-NEXT: ret void 2327 // 2328 // 2329 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2 2330 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] { 2331 // CHECK5-NEXT: entry: 2332 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2333 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2334 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 2335 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8 2336 // CHECK5-NEXT: [[X1:%.*]] = alloca i32, align 4 2337 // CHECK5-NEXT: [[Y2:%.*]] = alloca i32, align 4 2338 // CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8 2339 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2340 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2341 // CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 2342 // CHECK5-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8 2343 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 2344 // CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8 2345 // CHECK5-NEXT: store i32 0, i32* [[X1]], align 4 2346 // CHECK5-NEXT: store i32 0, i32* [[Y2]], align 4 2347 // CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 2348 // CHECK5-NEXT: [[TMP3:%.*]] = bitcast i32* [[X1]] to i8* 2349 // CHECK5-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 8 2350 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 2351 // CHECK5-NEXT: [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8* 2352 // CHECK5-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8 2353 // CHECK5-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2354 // CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 2355 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2356 // CHECK5-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i64 16, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 2357 // CHECK5-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2358 // CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2359 // CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2360 // CHECK5-NEXT: ] 2361 // CHECK5: .omp.reduction.case1: 2362 // CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4 2363 // CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[X1]], align 4 2364 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] 2365 // CHECK5-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4 2366 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4 2367 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4 2368 // CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2369 // CHECK5-NEXT: store i32 [[ADD3]], i32* [[TMP1]], align 4 2370 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2371 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2372 // CHECK5: .omp.reduction.case2: 2373 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[X1]], align 4 2374 // CHECK5-NEXT: [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4 2375 // CHECK5-NEXT: [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4 2376 // CHECK5-NEXT: [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4 2377 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2378 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2379 // CHECK5: .omp.reduction.default: 2380 // CHECK5-NEXT: ret void 2381 // 2382 // 2383 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 2384 // CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] { 2385 // CHECK5-NEXT: entry: 2386 // CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2387 // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 2388 // CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2389 // CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 2390 // CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 2391 // CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]* 2392 // CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 2393 // CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]* 2394 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0 2395 // CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2396 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 2397 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0 2398 // CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 2399 // CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 2400 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1 2401 // CHECK5-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8 2402 // CHECK5-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* 2403 // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1 2404 // CHECK5-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8 2405 // CHECK5-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32* 2406 // CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4 2407 // CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4 2408 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 2409 // CHECK5-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 2410 // CHECK5-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4 2411 // CHECK5-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4 2412 // CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] 2413 // CHECK5-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4 2414 // CHECK5-NEXT: ret void 2415 // 2416 // 2417 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45 2418 // CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] { 2419 // CHECK5-NEXT: entry: 2420 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 2421 // CHECK5-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8 2422 // CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 2423 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 2424 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2425 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32* 2426 // CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4 2427 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8 2428 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP2]]) 2429 // CHECK5-NEXT: ret void 2430 // 2431 // 2432 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3 2433 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR0]] { 2434 // CHECK5-NEXT: entry: 2435 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2436 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2437 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8 2438 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2439 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2440 // CHECK5-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8 2441 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32* 2442 // CHECK5-NEXT: ret void 2443 // 2444 // 2445 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51 2446 // CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] { 2447 // CHECK5-NEXT: entry: 2448 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 2449 // CHECK5-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8 2450 // CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 2451 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 2452 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2453 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32* 2454 // CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4 2455 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8 2456 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]]) 2457 // CHECK5-NEXT: ret void 2458 // 2459 // 2460 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..4 2461 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR0]] { 2462 // CHECK5-NEXT: entry: 2463 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2464 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2465 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8 2466 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2467 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2468 // CHECK5-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8 2469 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32* 2470 // CHECK5-NEXT: ret void 2471 // 2472 // 2473 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57 2474 // CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] { 2475 // CHECK5-NEXT: entry: 2476 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8 2477 // CHECK5-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8 2478 // CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8 2479 // CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8 2480 // CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 2481 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32* 2482 // CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4 2483 // CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8 2484 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP2]]) 2485 // CHECK5-NEXT: ret void 2486 // 2487 // 2488 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..5 2489 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR0]] { 2490 // CHECK5-NEXT: entry: 2491 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2492 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2493 // CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8 2494 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2495 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2496 // CHECK5-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8 2497 // CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32* 2498 // CHECK5-NEXT: ret void 2499 // 2500 // 2501 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63 2502 // CHECK5-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] { 2503 // CHECK5-NEXT: entry: 2504 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8 2505 // CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8 2506 // CHECK5-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8 2507 // CHECK5-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8 2508 // CHECK5-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8 2509 // CHECK5-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8 2510 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]]) 2511 // CHECK5-NEXT: ret void 2512 // 2513 // 2514 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..6 2515 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] { 2516 // CHECK5-NEXT: entry: 2517 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2518 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2519 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8 2520 // CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8 2521 // CHECK5-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4 2522 // CHECK5-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 2523 // CHECK5-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4 2524 // CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 2525 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2526 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2527 // CHECK5-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8 2528 // CHECK5-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8 2529 // CHECK5-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8 2530 // CHECK5-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8 2531 // CHECK5-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8* 2532 // CHECK5-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8* 2533 // CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false) 2534 // CHECK5-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0 2535 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99 2536 // CHECK5-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]] 2537 // CHECK5-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 2538 // CHECK5: omp.arrayinit.body: 2539 // CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 2540 // CHECK5-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2541 // CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 2542 // CHECK5-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]] 2543 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 2544 // CHECK5: omp.arrayinit.done: 2545 // CHECK5-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32* 2546 // CHECK5-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32* 2547 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 2548 // CHECK5-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 2549 // CHECK5-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8 2550 // CHECK5-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2551 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2552 // CHECK5-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2553 // CHECK5-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var) 2554 // CHECK5-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2555 // CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2556 // CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2557 // CHECK5-NEXT: ] 2558 // CHECK5: .omp.reduction.case1: 2559 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99 2560 // CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]] 2561 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 2562 // CHECK5: omp.arraycpy.body: 2563 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2564 // CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2565 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 2566 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 2567 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2568 // CHECK5-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 2569 // CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 2570 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 2571 // CHECK5-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]] 2572 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 2573 // CHECK5: omp.arraycpy.done6: 2574 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2575 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2576 // CHECK5: .omp.reduction.case2: 2577 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99 2578 // CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]] 2579 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 2580 // CHECK5: omp.arraycpy.body8: 2581 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2582 // CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2583 // CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 2584 // CHECK5-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4 2585 // CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 2586 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 2587 // CHECK5-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]] 2588 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 2589 // CHECK5: omp.arraycpy.done14: 2590 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2591 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2592 // CHECK5: .omp.reduction.default: 2593 // CHECK5-NEXT: ret void 2594 // 2595 // 2596 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7 2597 // CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] { 2598 // CHECK5-NEXT: entry: 2599 // CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2600 // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 2601 // CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2602 // CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 2603 // CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 2604 // CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2605 // CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 2606 // CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2607 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 2608 // CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2609 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 2610 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 2611 // CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 2612 // CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 2613 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99 2614 // CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 2615 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 2616 // CHECK5: omp.arraycpy.body: 2617 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2618 // CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2619 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2620 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 2621 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 2622 // CHECK5-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2623 // CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 2624 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 2625 // CHECK5-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 2626 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 2627 // CHECK5: omp.arraycpy.done2: 2628 // CHECK5-NEXT: ret void 2629 // 2630 // 2631 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65 2632 // CHECK5-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] { 2633 // CHECK5-NEXT: entry: 2634 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8 2635 // CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8 2636 // CHECK5-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8 2637 // CHECK5-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8 2638 // CHECK5-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8 2639 // CHECK5-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8 2640 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]]) 2641 // CHECK5-NEXT: ret void 2642 // 2643 // 2644 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..8 2645 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] { 2646 // CHECK5-NEXT: entry: 2647 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2648 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2649 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8 2650 // CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8 2651 // CHECK5-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4 2652 // CHECK5-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 2653 // CHECK5-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4 2654 // CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 2655 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2656 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2657 // CHECK5-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8 2658 // CHECK5-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8 2659 // CHECK5-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8 2660 // CHECK5-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8 2661 // CHECK5-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8* 2662 // CHECK5-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8* 2663 // CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false) 2664 // CHECK5-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0 2665 // CHECK5-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99 2666 // CHECK5-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]] 2667 // CHECK5-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 2668 // CHECK5: omp.arrayinit.body: 2669 // CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 2670 // CHECK5-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2671 // CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 2672 // CHECK5-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]] 2673 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 2674 // CHECK5: omp.arrayinit.done: 2675 // CHECK5-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32* 2676 // CHECK5-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32* 2677 // CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 2678 // CHECK5-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 2679 // CHECK5-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8 2680 // CHECK5-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2681 // CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 2682 // CHECK5-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2683 // CHECK5-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var) 2684 // CHECK5-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2685 // CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2686 // CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2687 // CHECK5-NEXT: ] 2688 // CHECK5: .omp.reduction.case1: 2689 // CHECK5-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99 2690 // CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]] 2691 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 2692 // CHECK5: omp.arraycpy.body: 2693 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2694 // CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2695 // CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 2696 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 2697 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 2698 // CHECK5-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 2699 // CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 2700 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 2701 // CHECK5-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]] 2702 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 2703 // CHECK5: omp.arraycpy.done6: 2704 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2705 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2706 // CHECK5: .omp.reduction.case2: 2707 // CHECK5-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99 2708 // CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]] 2709 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 2710 // CHECK5: omp.arraycpy.body8: 2711 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2712 // CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 2713 // CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 2714 // CHECK5-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4 2715 // CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 2716 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 2717 // CHECK5-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]] 2718 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 2719 // CHECK5: omp.arraycpy.done14: 2720 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2721 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2722 // CHECK5: .omp.reduction.default: 2723 // CHECK5-NEXT: ret void 2724 // 2725 // 2726 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9 2727 // CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] { 2728 // CHECK5-NEXT: entry: 2729 // CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2730 // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 2731 // CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2732 // CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 2733 // CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 2734 // CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2735 // CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 2736 // CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2737 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 2738 // CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2739 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 2740 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 2741 // CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 2742 // CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 2743 // CHECK5-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99 2744 // CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 2745 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 2746 // CHECK5: omp.arraycpy.body: 2747 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2748 // CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 2749 // CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2750 // CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 2751 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 2752 // CHECK5-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 2753 // CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 2754 // CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 2755 // CHECK5-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 2756 // CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 2757 // CHECK5: omp.arraycpy.done2: 2758 // CHECK5-NEXT: ret void 2759 // 2760 // 2761 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72 2762 // CHECK5-SAME: (i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] { 2763 // CHECK5-NEXT: entry: 2764 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8 2765 // CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8 2766 // CHECK5-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8 2767 // CHECK5-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8 2768 // CHECK5-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8 2769 // CHECK5-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8 2770 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]]) 2771 // CHECK5-NEXT: ret void 2772 // 2773 // 2774 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..10 2775 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] { 2776 // CHECK5-NEXT: entry: 2777 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2778 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2779 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8 2780 // CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8 2781 // CHECK5-NEXT: [[Y1:%.*]] = alloca i128, align 16 2782 // CHECK5-NEXT: [[X:%.*]] = alloca i128, align 16 2783 // CHECK5-NEXT: [[Z2:%.*]] = alloca i128, align 16 2784 // CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 2785 // CHECK5-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16 2786 // CHECK5-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16 2787 // CHECK5-NEXT: [[TMP:%.*]] = alloca i128, align 16 2788 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2789 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2790 // CHECK5-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8 2791 // CHECK5-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8 2792 // CHECK5-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8 2793 // CHECK5-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8 2794 // CHECK5-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16 2795 // CHECK5-NEXT: store i128 [[TMP2]], i128* [[Y1]], align 16 2796 // CHECK5-NEXT: store i128 0, i128* [[Z2]], align 16 2797 // CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 2798 // CHECK5-NEXT: [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8* 2799 // CHECK5-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8 2800 // CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2801 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 2802 // CHECK5-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2803 // CHECK5-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.11, [8 x i32]* @.gomp_critical_user_.reduction.var) 2804 // CHECK5-NEXT: switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2805 // CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2806 // CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2807 // CHECK5-NEXT: ] 2808 // CHECK5: .omp.reduction.case1: 2809 // CHECK5-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16 2810 // CHECK5-NEXT: [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16 2811 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]] 2812 // CHECK5-NEXT: store i128 [[ADD]], i128* [[TMP1]], align 16 2813 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2814 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2815 // CHECK5: .omp.reduction.case2: 2816 // CHECK5-NEXT: [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16 2817 // CHECK5-NEXT: [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8* 2818 // CHECK5-NEXT: [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8* 2819 // CHECK5-NEXT: call void @__atomic_load(i64 noundef 16, i8* noundef [[TMP12]], i8* noundef [[TMP13]], i32 noundef signext 0) #[[ATTR6:[0-9]+]] 2820 // CHECK5-NEXT: br label [[ATOMIC_CONT:%.*]] 2821 // CHECK5: atomic_cont: 2822 // CHECK5-NEXT: [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16 2823 // CHECK5-NEXT: store i128 [[TMP14]], i128* [[TMP]], align 16 2824 // CHECK5-NEXT: [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16 2825 // CHECK5-NEXT: [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16 2826 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]] 2827 // CHECK5-NEXT: store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16 2828 // CHECK5-NEXT: [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8* 2829 // CHECK5-NEXT: [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8* 2830 // CHECK5-NEXT: [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8* 2831 // CHECK5-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP17]], i8* noundef [[TMP18]], i8* noundef [[TMP19]], i32 noundef signext 0, i32 noundef signext 0) #[[ATTR6]] 2832 // CHECK5-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]] 2833 // CHECK5: atomic_exit: 2834 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2835 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2836 // CHECK5: .omp.reduction.default: 2837 // CHECK5-NEXT: ret void 2838 // 2839 // 2840 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.11 2841 // CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] { 2842 // CHECK5-NEXT: entry: 2843 // CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2844 // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 2845 // CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2846 // CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 2847 // CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 2848 // CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2849 // CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 2850 // CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2851 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 2852 // CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2853 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128* 2854 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 2855 // CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 2856 // CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128* 2857 // CHECK5-NEXT: [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16 2858 // CHECK5-NEXT: [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16 2859 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]] 2860 // CHECK5-NEXT: store i128 [[ADD]], i128* [[TMP11]], align 16 2861 // CHECK5-NEXT: ret void 2862 // 2863 // 2864 // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74 2865 // CHECK5-SAME: (i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] { 2866 // CHECK5-NEXT: entry: 2867 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8 2868 // CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8 2869 // CHECK5-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8 2870 // CHECK5-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8 2871 // CHECK5-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8 2872 // CHECK5-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8 2873 // CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]]) 2874 // CHECK5-NEXT: ret void 2875 // 2876 // 2877 // CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..12 2878 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] { 2879 // CHECK5-NEXT: entry: 2880 // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 2881 // CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 2882 // CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8 2883 // CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8 2884 // CHECK5-NEXT: [[Y1:%.*]] = alloca i128, align 16 2885 // CHECK5-NEXT: [[X:%.*]] = alloca i128, align 16 2886 // CHECK5-NEXT: [[Z2:%.*]] = alloca i128, align 16 2887 // CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 2888 // CHECK5-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16 2889 // CHECK5-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16 2890 // CHECK5-NEXT: [[TMP:%.*]] = alloca i128, align 16 2891 // CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 2892 // CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 2893 // CHECK5-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8 2894 // CHECK5-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8 2895 // CHECK5-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8 2896 // CHECK5-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8 2897 // CHECK5-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16 2898 // CHECK5-NEXT: store i128 [[TMP2]], i128* [[Y1]], align 16 2899 // CHECK5-NEXT: store i128 0, i128* [[Z2]], align 16 2900 // CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 2901 // CHECK5-NEXT: [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8* 2902 // CHECK5-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8 2903 // CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 2904 // CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4 2905 // CHECK5-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 2906 // CHECK5-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.13, [8 x i32]* @.gomp_critical_user_.reduction.var) 2907 // CHECK5-NEXT: switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 2908 // CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 2909 // CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 2910 // CHECK5-NEXT: ] 2911 // CHECK5: .omp.reduction.case1: 2912 // CHECK5-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16 2913 // CHECK5-NEXT: [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16 2914 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]] 2915 // CHECK5-NEXT: store i128 [[ADD]], i128* [[TMP1]], align 16 2916 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2917 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2918 // CHECK5: .omp.reduction.case2: 2919 // CHECK5-NEXT: [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16 2920 // CHECK5-NEXT: [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8* 2921 // CHECK5-NEXT: [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8* 2922 // CHECK5-NEXT: call void @__atomic_load(i64 noundef 16, i8* noundef [[TMP12]], i8* noundef [[TMP13]], i32 noundef signext 0) #[[ATTR6]] 2923 // CHECK5-NEXT: br label [[ATOMIC_CONT:%.*]] 2924 // CHECK5: atomic_cont: 2925 // CHECK5-NEXT: [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16 2926 // CHECK5-NEXT: store i128 [[TMP14]], i128* [[TMP]], align 16 2927 // CHECK5-NEXT: [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16 2928 // CHECK5-NEXT: [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16 2929 // CHECK5-NEXT: [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]] 2930 // CHECK5-NEXT: store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16 2931 // CHECK5-NEXT: [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8* 2932 // CHECK5-NEXT: [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8* 2933 // CHECK5-NEXT: [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8* 2934 // CHECK5-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP17]], i8* noundef [[TMP18]], i8* noundef [[TMP19]], i32 noundef signext 0, i32 noundef signext 0) #[[ATTR6]] 2935 // CHECK5-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]] 2936 // CHECK5: atomic_exit: 2937 // CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var) 2938 // CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 2939 // CHECK5: .omp.reduction.default: 2940 // CHECK5-NEXT: ret void 2941 // 2942 // 2943 // CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.13 2944 // CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] { 2945 // CHECK5-NEXT: entry: 2946 // CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 2947 // CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 2948 // CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 2949 // CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 2950 // CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 2951 // CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 2952 // CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 2953 // CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 2954 // CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 2955 // CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 2956 // CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128* 2957 // CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 2958 // CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 2959 // CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128* 2960 // CHECK5-NEXT: [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16 2961 // CHECK5-NEXT: [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16 2962 // CHECK5-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]] 2963 // CHECK5-NEXT: store i128 [[ADD]], i128* [[TMP11]], align 16 2964 // CHECK5-NEXT: ret void 2965 // 2966 // 2967 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27 2968 // CHECK7-SAME: () #[[ATTR0:[0-9]+]] { 2969 // CHECK7-NEXT: entry: 2970 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*)) 2971 // CHECK7-NEXT: ret void 2972 // 2973 // 2974 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined. 2975 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] { 2976 // CHECK7-NEXT: entry: 2977 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 2978 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 2979 // CHECK7-NEXT: [[X:%.*]] = alloca i32, align 4 2980 // CHECK7-NEXT: [[Y:%.*]] = alloca i32, align 4 2981 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 2982 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 2983 // CHECK7-NEXT: ret void 2984 // 2985 // 2986 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33 2987 // CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] { 2988 // CHECK7-NEXT: entry: 2989 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 2990 // CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4 2991 // CHECK7-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4 2992 // CHECK7-NEXT: [[Y_CASTED:%.*]] = alloca i32, align 4 2993 // CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 2994 // CHECK7-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4 2995 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 2996 // CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4 2997 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 2998 // CHECK7-NEXT: store i32 [[TMP2]], i32* [[X_CASTED]], align 4 2999 // CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[X_CASTED]], align 4 3000 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4 3001 // CHECK7-NEXT: store i32 [[TMP4]], i32* [[Y_CASTED]], align 4 3002 // CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[Y_CASTED]], align 4 3003 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP5]]) 3004 // CHECK7-NEXT: ret void 3005 // 3006 // 3007 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..1 3008 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) #[[ATTR0]] { 3009 // CHECK7-NEXT: entry: 3010 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3011 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3012 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4 3013 // CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca i32, align 4 3014 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3015 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3016 // CHECK7-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4 3017 // CHECK7-NEXT: store i32 [[Y]], i32* [[Y_ADDR]], align 4 3018 // CHECK7-NEXT: ret void 3019 // 3020 // 3021 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39 3022 // CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] { 3023 // CHECK7-NEXT: entry: 3024 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 3025 // CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4 3026 // CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 3027 // CHECK7-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4 3028 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 3029 // CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4 3030 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]]) 3031 // CHECK7-NEXT: ret void 3032 // 3033 // 3034 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2 3035 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] { 3036 // CHECK7-NEXT: entry: 3037 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3038 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3039 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 3040 // CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4 3041 // CHECK7-NEXT: [[X1:%.*]] = alloca i32, align 4 3042 // CHECK7-NEXT: [[Y2:%.*]] = alloca i32, align 4 3043 // CHECK7-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4 3044 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3045 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3046 // CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 3047 // CHECK7-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4 3048 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 3049 // CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4 3050 // CHECK7-NEXT: store i32 0, i32* [[X1]], align 4 3051 // CHECK7-NEXT: store i32 0, i32* [[Y2]], align 4 3052 // CHECK7-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 3053 // CHECK7-NEXT: [[TMP3:%.*]] = bitcast i32* [[X1]] to i8* 3054 // CHECK7-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 4 3055 // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1 3056 // CHECK7-NEXT: [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8* 3057 // CHECK7-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 4 3058 // CHECK7-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 3059 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4 3060 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 3061 // CHECK7-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i32 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) 3062 // CHECK7-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 3063 // CHECK7-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 3064 // CHECK7-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 3065 // CHECK7-NEXT: ] 3066 // CHECK7: .omp.reduction.case1: 3067 // CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4 3068 // CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[X1]], align 4 3069 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] 3070 // CHECK7-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4 3071 // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4 3072 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4 3073 // CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 3074 // CHECK7-NEXT: store i32 [[ADD3]], i32* [[TMP1]], align 4 3075 // CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var) 3076 // CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 3077 // CHECK7: .omp.reduction.case2: 3078 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[X1]], align 4 3079 // CHECK7-NEXT: [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4 3080 // CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4 3081 // CHECK7-NEXT: [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4 3082 // CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var) 3083 // CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 3084 // CHECK7: .omp.reduction.default: 3085 // CHECK7-NEXT: ret void 3086 // 3087 // 3088 // CHECK7-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func 3089 // CHECK7-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] { 3090 // CHECK7-NEXT: entry: 3091 // CHECK7-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 3092 // CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 3093 // CHECK7-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 3094 // CHECK7-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 3095 // CHECK7-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 3096 // CHECK7-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]* 3097 // CHECK7-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 3098 // CHECK7-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]* 3099 // CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0 3100 // CHECK7-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 3101 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 3102 // CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 0 3103 // CHECK7-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 3104 // CHECK7-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 3105 // CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1 3106 // CHECK7-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4 3107 // CHECK7-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32* 3108 // CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 1 3109 // CHECK7-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4 3110 // CHECK7-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32* 3111 // CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4 3112 // CHECK7-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4 3113 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 3114 // CHECK7-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 3115 // CHECK7-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4 3116 // CHECK7-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4 3117 // CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] 3118 // CHECK7-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4 3119 // CHECK7-NEXT: ret void 3120 // 3121 // 3122 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45 3123 // CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] { 3124 // CHECK7-NEXT: entry: 3125 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 3126 // CHECK7-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4 3127 // CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 3128 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 3129 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 3130 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4 3131 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4 3132 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP2]]) 3133 // CHECK7-NEXT: ret void 3134 // 3135 // 3136 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3 3137 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR0]] { 3138 // CHECK7-NEXT: entry: 3139 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3140 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3141 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4 3142 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3143 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3144 // CHECK7-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4 3145 // CHECK7-NEXT: ret void 3146 // 3147 // 3148 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51 3149 // CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] { 3150 // CHECK7-NEXT: entry: 3151 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 3152 // CHECK7-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4 3153 // CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 3154 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 3155 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 3156 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4 3157 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4 3158 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]]) 3159 // CHECK7-NEXT: ret void 3160 // 3161 // 3162 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..4 3163 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR0]] { 3164 // CHECK7-NEXT: entry: 3165 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3166 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3167 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4 3168 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3169 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3170 // CHECK7-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4 3171 // CHECK7-NEXT: ret void 3172 // 3173 // 3174 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57 3175 // CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] { 3176 // CHECK7-NEXT: entry: 3177 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4 3178 // CHECK7-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4 3179 // CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4 3180 // CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4 3181 // CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4 3182 // CHECK7-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4 3183 // CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4 3184 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP2]]) 3185 // CHECK7-NEXT: ret void 3186 // 3187 // 3188 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..5 3189 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR0]] { 3190 // CHECK7-NEXT: entry: 3191 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3192 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3193 // CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4 3194 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3195 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3196 // CHECK7-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4 3197 // CHECK7-NEXT: ret void 3198 // 3199 // 3200 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63 3201 // CHECK7-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] { 3202 // CHECK7-NEXT: entry: 3203 // CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4 3204 // CHECK7-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4 3205 // CHECK7-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4 3206 // CHECK7-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4 3207 // CHECK7-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4 3208 // CHECK7-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4 3209 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]]) 3210 // CHECK7-NEXT: ret void 3211 // 3212 // 3213 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..6 3214 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] { 3215 // CHECK7-NEXT: entry: 3216 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3217 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3218 // CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4 3219 // CHECK7-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4 3220 // CHECK7-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4 3221 // CHECK7-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 3222 // CHECK7-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4 3223 // CHECK7-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 3224 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3225 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3226 // CHECK7-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4 3227 // CHECK7-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4 3228 // CHECK7-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4 3229 // CHECK7-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4 3230 // CHECK7-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8* 3231 // CHECK7-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8* 3232 // CHECK7-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false) 3233 // CHECK7-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0 3234 // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99 3235 // CHECK7-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]] 3236 // CHECK7-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 3237 // CHECK7: omp.arrayinit.body: 3238 // CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 3239 // CHECK7-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 3240 // CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 3241 // CHECK7-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]] 3242 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 3243 // CHECK7: omp.arrayinit.done: 3244 // CHECK7-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32* 3245 // CHECK7-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32* 3246 // CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 3247 // CHECK7-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 3248 // CHECK7-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4 3249 // CHECK7-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 3250 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 3251 // CHECK7-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 3252 // CHECK7-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var) 3253 // CHECK7-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 3254 // CHECK7-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 3255 // CHECK7-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 3256 // CHECK7-NEXT: ] 3257 // CHECK7: .omp.reduction.case1: 3258 // CHECK7-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99 3259 // CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]] 3260 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 3261 // CHECK7: omp.arraycpy.body: 3262 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 3263 // CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 3264 // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 3265 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 3266 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 3267 // CHECK7-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 3268 // CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 3269 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 3270 // CHECK7-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]] 3271 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 3272 // CHECK7: omp.arraycpy.done6: 3273 // CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 3274 // CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 3275 // CHECK7: .omp.reduction.case2: 3276 // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99 3277 // CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]] 3278 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 3279 // CHECK7: omp.arraycpy.body8: 3280 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 3281 // CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 3282 // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 3283 // CHECK7-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4 3284 // CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 3285 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 3286 // CHECK7-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]] 3287 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 3288 // CHECK7: omp.arraycpy.done14: 3289 // CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 3290 // CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 3291 // CHECK7: .omp.reduction.default: 3292 // CHECK7-NEXT: ret void 3293 // 3294 // 3295 // CHECK7-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7 3296 // CHECK7-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] { 3297 // CHECK7-NEXT: entry: 3298 // CHECK7-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 3299 // CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 3300 // CHECK7-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 3301 // CHECK7-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 3302 // CHECK7-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 3303 // CHECK7-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 3304 // CHECK7-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 3305 // CHECK7-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 3306 // CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 3307 // CHECK7-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 3308 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 3309 // CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 3310 // CHECK7-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 3311 // CHECK7-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 3312 // CHECK7-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99 3313 // CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 3314 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 3315 // CHECK7: omp.arraycpy.body: 3316 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 3317 // CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 3318 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 3319 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 3320 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 3321 // CHECK7-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 3322 // CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 3323 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 3324 // CHECK7-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 3325 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 3326 // CHECK7: omp.arraycpy.done2: 3327 // CHECK7-NEXT: ret void 3328 // 3329 // 3330 // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65 3331 // CHECK7-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] { 3332 // CHECK7-NEXT: entry: 3333 // CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4 3334 // CHECK7-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4 3335 // CHECK7-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4 3336 // CHECK7-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4 3337 // CHECK7-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4 3338 // CHECK7-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4 3339 // CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]]) 3340 // CHECK7-NEXT: ret void 3341 // 3342 // 3343 // CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..8 3344 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] { 3345 // CHECK7-NEXT: entry: 3346 // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 3347 // CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 3348 // CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4 3349 // CHECK7-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4 3350 // CHECK7-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4 3351 // CHECK7-NEXT: [[X:%.*]] = alloca [77 x i32], align 4 3352 // CHECK7-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4 3353 // CHECK7-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 3354 // CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 3355 // CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 3356 // CHECK7-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4 3357 // CHECK7-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4 3358 // CHECK7-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4 3359 // CHECK7-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4 3360 // CHECK7-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8* 3361 // CHECK7-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8* 3362 // CHECK7-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false) 3363 // CHECK7-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0 3364 // CHECK7-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99 3365 // CHECK7-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]] 3366 // CHECK7-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] 3367 // CHECK7: omp.arrayinit.body: 3368 // CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] 3369 // CHECK7-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 3370 // CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 3371 // CHECK7-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]] 3372 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] 3373 // CHECK7: omp.arrayinit.done: 3374 // CHECK7-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32* 3375 // CHECK7-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32* 3376 // CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 3377 // CHECK7-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* 3378 // CHECK7-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4 3379 // CHECK7-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 3380 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4 3381 // CHECK7-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* 3382 // CHECK7-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var) 3383 // CHECK7-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ 3384 // CHECK7-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] 3385 // CHECK7-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] 3386 // CHECK7-NEXT: ] 3387 // CHECK7: .omp.reduction.case1: 3388 // CHECK7-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99 3389 // CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]] 3390 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 3391 // CHECK7: omp.arraycpy.body: 3392 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 3393 // CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] 3394 // CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 3395 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 3396 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] 3397 // CHECK7-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 3398 // CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 3399 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 3400 // CHECK7-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]] 3401 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] 3402 // CHECK7: omp.arraycpy.done6: 3403 // CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 3404 // CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 3405 // CHECK7: .omp.reduction.case2: 3406 // CHECK7-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99 3407 // CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]] 3408 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] 3409 // CHECK7: omp.arraycpy.body8: 3410 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 3411 // CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] 3412 // CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 3413 // CHECK7-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4 3414 // CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 3415 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 3416 // CHECK7-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]] 3417 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] 3418 // CHECK7: omp.arraycpy.done14: 3419 // CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var) 3420 // CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] 3421 // CHECK7: .omp.reduction.default: 3422 // CHECK7-NEXT: ret void 3423 // 3424 // 3425 // CHECK7-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9 3426 // CHECK7-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] { 3427 // CHECK7-NEXT: entry: 3428 // CHECK7-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 3429 // CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 3430 // CHECK7-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 3431 // CHECK7-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 3432 // CHECK7-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 3433 // CHECK7-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* 3434 // CHECK7-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 3435 // CHECK7-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* 3436 // CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 3437 // CHECK7-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 3438 // CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* 3439 // CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 3440 // CHECK7-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 3441 // CHECK7-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* 3442 // CHECK7-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99 3443 // CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] 3444 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 3445 // CHECK7: omp.arraycpy.body: 3446 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 3447 // CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 3448 // CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 3449 // CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 3450 // CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] 3451 // CHECK7-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 3452 // CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 3453 // CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 3454 // CHECK7-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] 3455 // CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] 3456 // CHECK7: omp.arraycpy.done2: 3457 // CHECK7-NEXT: ret void 3458 // 3459