1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // Test target codegen - host bc file has to be created first. 3 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc 4 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns | FileCheck %s --check-prefix=CHECK1 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - -disable-llvm-optzns -fopenmp-cuda-parallel-target-regions | FileCheck %s --check-prefix=CHECK2 6 // expected-no-diagnostics 7 #ifndef HEADER 8 #define HEADER 9 10 template<typename tx> 11 tx ftemplate(int n) { 12 tx b[10]; 13 14 #pragma omp target 15 { 16 tx d = n; 17 #pragma omp parallel for 18 for(int i=0; i<10; i++) { 19 b[i] += d; 20 } 21 b[3] += 1; 22 } 23 24 return b[3]; 25 } 26 27 int bar(int n){ 28 int a = 0; 29 30 a += ftemplate<int>(n); 31 32 return a; 33 } 34 35 #endif 36 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14_worker 37 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] { 38 // CHECK1-NEXT: entry: 39 // CHECK1-NEXT: [[WORK_FN:%.*]] = alloca i8*, align 8 40 // CHECK1-NEXT: [[EXEC_STATUS:%.*]] = alloca i8, align 1 41 // CHECK1-NEXT: store i8* null, i8** [[WORK_FN]], align 8 42 // CHECK1-NEXT: store i8 0, i8* [[EXEC_STATUS]], align 1 43 // CHECK1-NEXT: br label [[DOTAWAIT_WORK:%.*]] 44 // CHECK1: .await.work: 45 // CHECK1-NEXT: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) 46 // CHECK1-NEXT: [[TMP0:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORK_FN]]) 47 // CHECK1-NEXT: [[TMP1:%.*]] = zext i1 [[TMP0]] to i8 48 // CHECK1-NEXT: store i8 [[TMP1]], i8* [[EXEC_STATUS]], align 1 49 // CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[WORK_FN]], align 8 50 // CHECK1-NEXT: [[SHOULD_TERMINATE:%.*]] = icmp eq i8* [[TMP2]], null 51 // CHECK1-NEXT: br i1 [[SHOULD_TERMINATE]], label [[DOTEXIT:%.*]], label [[DOTSELECT_WORKERS:%.*]] 52 // CHECK1: .select.workers: 53 // CHECK1-NEXT: [[TMP3:%.*]] = load i8, i8* [[EXEC_STATUS]], align 1 54 // CHECK1-NEXT: [[IS_ACTIVE:%.*]] = icmp ne i8 [[TMP3]], 0 55 // CHECK1-NEXT: br i1 [[IS_ACTIVE]], label [[DOTEXECUTE_PARALLEL:%.*]], label [[DOTBARRIER_PARALLEL:%.*]] 56 // CHECK1: .execute.parallel: 57 // CHECK1-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 58 // CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[WORK_FN]], align 8 59 // CHECK1-NEXT: [[WORK_MATCH:%.*]] = icmp eq i8* [[TMP5]], bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*) 60 // CHECK1-NEXT: br i1 [[WORK_MATCH]], label [[DOTEXECUTE_FN:%.*]], label [[DOTCHECK_NEXT:%.*]] 61 // CHECK1: .execute.fn: 62 // CHECK1-NEXT: call void @__omp_outlined___wrapper(i16 0, i32 [[TMP4]]) #[[ATTR3:[0-9]+]] 63 // CHECK1-NEXT: br label [[DOTTERMINATE_PARALLEL:%.*]] 64 // CHECK1: .check.next: 65 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP2]] to void (i16, i32)* 66 // CHECK1-NEXT: call void [[TMP6]](i16 0, i32 [[TMP4]]) 67 // CHECK1-NEXT: br label [[DOTTERMINATE_PARALLEL]] 68 // CHECK1: .terminate.parallel: 69 // CHECK1-NEXT: call void @__kmpc_kernel_end_parallel() 70 // CHECK1-NEXT: br label [[DOTBARRIER_PARALLEL]] 71 // CHECK1: .barrier.parallel: 72 // CHECK1-NEXT: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) 73 // CHECK1-NEXT: br label [[DOTAWAIT_WORK]] 74 // CHECK1: .exit: 75 // CHECK1-NEXT: ret void 76 // 77 // 78 // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14 79 // CHECK1-SAME: (i64 [[N:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1:[0-9]+]] { 80 // CHECK1-NEXT: entry: 81 // CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 82 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 83 // CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8 84 // CHECK1-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 85 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 86 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* 87 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 88 // CHECK1-NEXT: [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() 89 // CHECK1-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() 90 // CHECK1-NEXT: [[NVPTX_WARP_SIZE:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() 91 // CHECK1-NEXT: [[THREAD_LIMIT:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS]], [[NVPTX_WARP_SIZE]] 92 // CHECK1-NEXT: [[TMP1:%.*]] = icmp ult i32 [[NVPTX_TID]], [[THREAD_LIMIT]] 93 // CHECK1-NEXT: br i1 [[TMP1]], label [[DOTWORKER:%.*]], label [[DOTMASTERCHECK:%.*]] 94 // CHECK1: .worker: 95 // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14_worker() #[[ATTR3]] 96 // CHECK1-NEXT: br label [[DOTEXIT:%.*]] 97 // CHECK1: .mastercheck: 98 // CHECK1-NEXT: [[NVPTX_TID1:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() 99 // CHECK1-NEXT: [[NVPTX_NUM_THREADS2:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() 100 // CHECK1-NEXT: [[NVPTX_WARP_SIZE3:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() 101 // CHECK1-NEXT: [[TMP2:%.*]] = sub nuw i32 [[NVPTX_WARP_SIZE3]], 1 102 // CHECK1-NEXT: [[TMP3:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS2]], 1 103 // CHECK1-NEXT: [[TMP4:%.*]] = xor i32 [[TMP2]], -1 104 // CHECK1-NEXT: [[MASTER_TID:%.*]] = and i32 [[TMP3]], [[TMP4]] 105 // CHECK1-NEXT: [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID1]], [[MASTER_TID]] 106 // CHECK1-NEXT: br i1 [[TMP5]], label [[DOTMASTER:%.*]], label [[DOTEXIT]] 107 // CHECK1: .master: 108 // CHECK1-NEXT: [[NVPTX_NUM_THREADS4:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() 109 // CHECK1-NEXT: [[NVPTX_WARP_SIZE5:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() 110 // CHECK1-NEXT: [[THREAD_LIMIT6:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS4]], [[NVPTX_WARP_SIZE5]] 111 // CHECK1-NEXT: call void @__kmpc_kernel_init(i32 [[THREAD_LIMIT6]], i16 1) 112 // CHECK1-NEXT: call void @__kmpc_data_sharing_init_stack() 113 // CHECK1-NEXT: [[TMP6:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 114 // CHECK1-NEXT: [[TMP7:%.*]] = load i64, i64* @"_openmp_static_kernel$size", align 8 115 // CHECK1-NEXT: call void @__kmpc_get_team_static_memory(i16 0, i8* addrspacecast (i8 addrspace(3)* getelementptr inbounds (%"union._shared_openmp_static_memory_type_$_", %"union._shared_openmp_static_memory_type_$_" addrspace(3)* @"_openmp_shared_static_glob_rd_$_", i32 0, i32 0, i32 0) to i8*), i64 [[TMP7]], i16 [[TMP6]], i8** addrspacecast (i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr" to i8**)) 116 // CHECK1-NEXT: [[TMP8:%.*]] = load i8*, i8* addrspace(3)* @"_openmp_kernel_static_glob_rd$ptr", align 8 117 // CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, i8* [[TMP8]], i64 0 118 // CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8* [[TMP9]] to %struct._globalized_locals_ty* 119 // CHECK1-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP10]], i32 0, i32 0 120 // CHECK1-NEXT: [[TMP11:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 121 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[CONV]], align 8 122 // CHECK1-NEXT: store i32 [[TMP12]], i32* [[D]], align 4 123 // CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 124 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8* 125 // CHECK1-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 8 126 // CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 127 // CHECK1-NEXT: [[TMP16:%.*]] = bitcast i32* [[D]] to i8* 128 // CHECK1-NEXT: store i8* [[TMP16]], i8** [[TMP15]], align 8 129 // CHECK1-NEXT: [[TMP17:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 130 // CHECK1-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP11]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, [10 x i32]*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP17]], i64 2) 131 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 3 132 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 133 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], 1 134 // CHECK1-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX]], align 4 135 // CHECK1-NEXT: [[TMP19:%.*]] = load i16, i16* @"_openmp_static_kernel$is_shared", align 2 136 // CHECK1-NEXT: call void @__kmpc_restore_team_static_memory(i16 0, i16 [[TMP19]]) 137 // CHECK1-NEXT: br label [[DOTTERMINATION_NOTIFIER:%.*]] 138 // CHECK1: .termination.notifier: 139 // CHECK1-NEXT: call void @__kmpc_kernel_deinit(i16 1) 140 // CHECK1-NEXT: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) 141 // CHECK1-NEXT: br label [[DOTEXIT]] 142 // CHECK1: .exit: 143 // CHECK1-NEXT: ret void 144 // 145 // 146 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined__ 147 // CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] { 148 // CHECK1-NEXT: entry: 149 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 150 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 151 // CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 152 // CHECK1-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 153 // CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 154 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 155 // CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 156 // CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 157 // CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 158 // CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 159 // CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 160 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 161 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 162 // CHECK1-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 163 // CHECK1-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 164 // CHECK1-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 165 // CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8 166 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 167 // CHECK1-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 168 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 169 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 170 // CHECK1-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 171 // CHECK1-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 172 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP3]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 173 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 174 // CHECK1: omp.dispatch.cond: 175 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 176 // CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9 177 // CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 178 // CHECK1: cond.true: 179 // CHECK1-NEXT: br label [[COND_END:%.*]] 180 // CHECK1: cond.false: 181 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 182 // CHECK1-NEXT: br label [[COND_END]] 183 // CHECK1: cond.end: 184 // CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 185 // CHECK1-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 186 // CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 187 // CHECK1-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 188 // CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 189 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 190 // CHECK1-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 191 // CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 192 // CHECK1: omp.dispatch.body: 193 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 194 // CHECK1: omp.inner.for.cond: 195 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 196 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 197 // CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] 198 // CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 199 // CHECK1: omp.inner.for.body: 200 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 201 // CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 202 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 203 // CHECK1-NEXT: store i32 [[ADD]], i32* [[I]], align 4 204 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4 205 // CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4 206 // CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 207 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]] 208 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 209 // CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], [[TMP12]] 210 // CHECK1-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4 211 // CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 212 // CHECK1: omp.body.continue: 213 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 214 // CHECK1: omp.inner.for.inc: 215 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 216 // CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1 217 // CHECK1-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 218 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 219 // CHECK1: omp.inner.for.end: 220 // CHECK1-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 221 // CHECK1: omp.dispatch.inc: 222 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 223 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 224 // CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] 225 // CHECK1-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_LB]], align 4 226 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 227 // CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 228 // CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 229 // CHECK1-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_UB]], align 4 230 // CHECK1-NEXT: br label [[OMP_DISPATCH_COND]] 231 // CHECK1: omp.dispatch.end: 232 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 233 // CHECK1-NEXT: ret void 234 // 235 // 236 // CHECK1-LABEL: define {{[^@]+}}@__omp_outlined___wrapper 237 // CHECK1-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { 238 // CHECK1-NEXT: entry: 239 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 240 // CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 241 // CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 242 // CHECK1-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 243 // CHECK1-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 244 // CHECK1-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 245 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 246 // CHECK1-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) 247 // CHECK1-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8 248 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0 249 // CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to [10 x i32]** 250 // CHECK1-NEXT: [[TMP5:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP4]], align 8 251 // CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1 252 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32** 253 // CHECK1-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8 254 // CHECK1-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP5]], i32* [[TMP8]]) #[[ATTR3]] 255 // CHECK1-NEXT: ret void 256 // 257 // 258 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14_worker 259 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] { 260 // CHECK2-NEXT: entry: 261 // CHECK2-NEXT: [[WORK_FN:%.*]] = alloca i8*, align 8 262 // CHECK2-NEXT: [[EXEC_STATUS:%.*]] = alloca i8, align 1 263 // CHECK2-NEXT: store i8* null, i8** [[WORK_FN]], align 8 264 // CHECK2-NEXT: store i8 0, i8* [[EXEC_STATUS]], align 1 265 // CHECK2-NEXT: br label [[DOTAWAIT_WORK:%.*]] 266 // CHECK2: .await.work: 267 // CHECK2-NEXT: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) 268 // CHECK2-NEXT: [[TMP0:%.*]] = call i1 @__kmpc_kernel_parallel(i8** [[WORK_FN]]) 269 // CHECK2-NEXT: [[TMP1:%.*]] = zext i1 [[TMP0]] to i8 270 // CHECK2-NEXT: store i8 [[TMP1]], i8* [[EXEC_STATUS]], align 1 271 // CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[WORK_FN]], align 8 272 // CHECK2-NEXT: [[SHOULD_TERMINATE:%.*]] = icmp eq i8* [[TMP2]], null 273 // CHECK2-NEXT: br i1 [[SHOULD_TERMINATE]], label [[DOTEXIT:%.*]], label [[DOTSELECT_WORKERS:%.*]] 274 // CHECK2: .select.workers: 275 // CHECK2-NEXT: [[TMP3:%.*]] = load i8, i8* [[EXEC_STATUS]], align 1 276 // CHECK2-NEXT: [[IS_ACTIVE:%.*]] = icmp ne i8 [[TMP3]], 0 277 // CHECK2-NEXT: br i1 [[IS_ACTIVE]], label [[DOTEXECUTE_PARALLEL:%.*]], label [[DOTBARRIER_PARALLEL:%.*]] 278 // CHECK2: .execute.parallel: 279 // CHECK2-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 280 // CHECK2-NEXT: [[TMP5:%.*]] = load i8*, i8** [[WORK_FN]], align 8 281 // CHECK2-NEXT: [[WORK_MATCH:%.*]] = icmp eq i8* [[TMP5]], bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*) 282 // CHECK2-NEXT: br i1 [[WORK_MATCH]], label [[DOTEXECUTE_FN:%.*]], label [[DOTCHECK_NEXT:%.*]] 283 // CHECK2: .execute.fn: 284 // CHECK2-NEXT: call void @__omp_outlined___wrapper(i16 0, i32 [[TMP4]]) #[[ATTR3:[0-9]+]] 285 // CHECK2-NEXT: br label [[DOTTERMINATE_PARALLEL:%.*]] 286 // CHECK2: .check.next: 287 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP2]] to void (i16, i32)* 288 // CHECK2-NEXT: call void [[TMP6]](i16 0, i32 [[TMP4]]) 289 // CHECK2-NEXT: br label [[DOTTERMINATE_PARALLEL]] 290 // CHECK2: .terminate.parallel: 291 // CHECK2-NEXT: call void @__kmpc_kernel_end_parallel() 292 // CHECK2-NEXT: br label [[DOTBARRIER_PARALLEL]] 293 // CHECK2: .barrier.parallel: 294 // CHECK2-NEXT: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) 295 // CHECK2-NEXT: br label [[DOTAWAIT_WORK]] 296 // CHECK2: .exit: 297 // CHECK2-NEXT: ret void 298 // 299 // 300 // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14 301 // CHECK2-SAME: (i64 [[N:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1:[0-9]+]] { 302 // CHECK2-NEXT: entry: 303 // CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8 304 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 305 // CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x i8*], align 8 306 // CHECK2-NEXT: store i64 [[N]], i64* [[N_ADDR]], align 8 307 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 308 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* 309 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 310 // CHECK2-NEXT: [[NVPTX_TID:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() 311 // CHECK2-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() 312 // CHECK2-NEXT: [[NVPTX_WARP_SIZE:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() 313 // CHECK2-NEXT: [[THREAD_LIMIT:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS]], [[NVPTX_WARP_SIZE]] 314 // CHECK2-NEXT: [[TMP1:%.*]] = icmp ult i32 [[NVPTX_TID]], [[THREAD_LIMIT]] 315 // CHECK2-NEXT: br i1 [[TMP1]], label [[DOTWORKER:%.*]], label [[DOTMASTERCHECK:%.*]] 316 // CHECK2: .worker: 317 // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l14_worker() #[[ATTR3]] 318 // CHECK2-NEXT: br label [[DOTEXIT:%.*]] 319 // CHECK2: .mastercheck: 320 // CHECK2-NEXT: [[NVPTX_TID1:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x() 321 // CHECK2-NEXT: [[NVPTX_NUM_THREADS2:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() 322 // CHECK2-NEXT: [[NVPTX_WARP_SIZE3:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() 323 // CHECK2-NEXT: [[TMP2:%.*]] = sub nuw i32 [[NVPTX_WARP_SIZE3]], 1 324 // CHECK2-NEXT: [[TMP3:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS2]], 1 325 // CHECK2-NEXT: [[TMP4:%.*]] = xor i32 [[TMP2]], -1 326 // CHECK2-NEXT: [[MASTER_TID:%.*]] = and i32 [[TMP3]], [[TMP4]] 327 // CHECK2-NEXT: [[TMP5:%.*]] = icmp eq i32 [[NVPTX_TID1]], [[MASTER_TID]] 328 // CHECK2-NEXT: br i1 [[TMP5]], label [[DOTMASTER:%.*]], label [[DOTEXIT]] 329 // CHECK2: .master: 330 // CHECK2-NEXT: [[NVPTX_NUM_THREADS4:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x() 331 // CHECK2-NEXT: [[NVPTX_WARP_SIZE5:%.*]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() 332 // CHECK2-NEXT: [[THREAD_LIMIT6:%.*]] = sub nuw i32 [[NVPTX_NUM_THREADS4]], [[NVPTX_WARP_SIZE5]] 333 // CHECK2-NEXT: call void @__kmpc_kernel_init(i32 [[THREAD_LIMIT6]], i16 1) 334 // CHECK2-NEXT: call void @__kmpc_data_sharing_init_stack() 335 // CHECK2-NEXT: [[TMP6:%.*]] = call i8* @__kmpc_data_sharing_push_stack(i64 4, i16 1) 336 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to %struct._globalized_locals_ty* 337 // CHECK2-NEXT: [[D:%.*]] = getelementptr inbounds [[STRUCT__GLOBALIZED_LOCALS_TY:%.*]], %struct._globalized_locals_ty* [[TMP7]], i32 0, i32 0 338 // CHECK2-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) 339 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[CONV]], align 8 340 // CHECK2-NEXT: store i32 [[TMP9]], i32* [[D]], align 4 341 // CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 342 // CHECK2-NEXT: [[TMP11:%.*]] = bitcast [10 x i32]* [[TMP0]] to i8* 343 // CHECK2-NEXT: store i8* [[TMP11]], i8** [[TMP10]], align 8 344 // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 1 345 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i32* [[D]] to i8* 346 // CHECK2-NEXT: store i8* [[TMP13]], i8** [[TMP12]], align 8 347 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast [2 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** 348 // CHECK2-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, [10 x i32]*, i32*)* @__omp_outlined__ to i8*), i8* bitcast (void (i16, i32)* @__omp_outlined___wrapper to i8*), i8** [[TMP14]], i64 2) 349 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 3 350 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 351 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP15]], 1 352 // CHECK2-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX]], align 4 353 // CHECK2-NEXT: call void @__kmpc_data_sharing_pop_stack(i8* [[TMP6]]) 354 // CHECK2-NEXT: br label [[DOTTERMINATION_NOTIFIER:%.*]] 355 // CHECK2: .termination.notifier: 356 // CHECK2-NEXT: call void @__kmpc_kernel_deinit(i16 1) 357 // CHECK2-NEXT: call void @__kmpc_barrier_simple_spmd(%struct.ident_t* null, i32 0) 358 // CHECK2-NEXT: br label [[DOTEXIT]] 359 // CHECK2: .exit: 360 // CHECK2-NEXT: ret void 361 // 362 // 363 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined__ 364 // CHECK2-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], [10 x i32]* nonnull align 4 dereferenceable(40) [[B:%.*]], i32* nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] { 365 // CHECK2-NEXT: entry: 366 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 367 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 368 // CHECK2-NEXT: [[B_ADDR:%.*]] = alloca [10 x i32]*, align 8 369 // CHECK2-NEXT: [[D_ADDR:%.*]] = alloca i32*, align 8 370 // CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 371 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 372 // CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 373 // CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 374 // CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 375 // CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 376 // CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 377 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 378 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 379 // CHECK2-NEXT: store [10 x i32]* [[B]], [10 x i32]** [[B_ADDR]], align 8 380 // CHECK2-NEXT: store i32* [[D]], i32** [[D_ADDR]], align 8 381 // CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[B_ADDR]], align 8 382 // CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[D_ADDR]], align 8 383 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 384 // CHECK2-NEXT: store i32 9, i32* [[DOTOMP_UB]], align 4 385 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 386 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 387 // CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 388 // CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4 389 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP3]], i32 33, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) 390 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 391 // CHECK2: omp.dispatch.cond: 392 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 393 // CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 9 394 // CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 395 // CHECK2: cond.true: 396 // CHECK2-NEXT: br label [[COND_END:%.*]] 397 // CHECK2: cond.false: 398 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 399 // CHECK2-NEXT: br label [[COND_END]] 400 // CHECK2: cond.end: 401 // CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 402 // CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 403 // CHECK2-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 404 // CHECK2-NEXT: store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4 405 // CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 406 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 407 // CHECK2-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 408 // CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 409 // CHECK2: omp.dispatch.body: 410 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 411 // CHECK2: omp.inner.for.cond: 412 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 413 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 414 // CHECK2-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] 415 // CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 416 // CHECK2: omp.inner.for.body: 417 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 418 // CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1 419 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 420 // CHECK2-NEXT: store i32 [[ADD]], i32* [[I]], align 4 421 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4 422 // CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4 423 // CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 424 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i64 0, i64 [[IDXPROM]] 425 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 426 // CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP14]], [[TMP12]] 427 // CHECK2-NEXT: store i32 [[ADD3]], i32* [[ARRAYIDX]], align 4 428 // CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 429 // CHECK2: omp.body.continue: 430 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 431 // CHECK2: omp.inner.for.inc: 432 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 433 // CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP15]], 1 434 // CHECK2-NEXT: store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4 435 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 436 // CHECK2: omp.inner.for.end: 437 // CHECK2-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 438 // CHECK2: omp.dispatch.inc: 439 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 440 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 441 // CHECK2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP17]] 442 // CHECK2-NEXT: store i32 [[ADD5]], i32* [[DOTOMP_LB]], align 4 443 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 444 // CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 445 // CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[TMP19]] 446 // CHECK2-NEXT: store i32 [[ADD6]], i32* [[DOTOMP_UB]], align 4 447 // CHECK2-NEXT: br label [[OMP_DISPATCH_COND]] 448 // CHECK2: omp.dispatch.end: 449 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP3]]) 450 // CHECK2-NEXT: ret void 451 // 452 // 453 // CHECK2-LABEL: define {{[^@]+}}@__omp_outlined___wrapper 454 // CHECK2-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR0]] { 455 // CHECK2-NEXT: entry: 456 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2 457 // CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 458 // CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 459 // CHECK2-NEXT: [[GLOBAL_ARGS:%.*]] = alloca i8**, align 8 460 // CHECK2-NEXT: store i32 0, i32* [[DOTZERO_ADDR]], align 4 461 // CHECK2-NEXT: store i16 [[TMP0]], i16* [[DOTADDR]], align 2 462 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 463 // CHECK2-NEXT: call void @__kmpc_get_shared_variables(i8*** [[GLOBAL_ARGS]]) 464 // CHECK2-NEXT: [[TMP2:%.*]] = load i8**, i8*** [[GLOBAL_ARGS]], align 8 465 // CHECK2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 0 466 // CHECK2-NEXT: [[TMP4:%.*]] = bitcast i8** [[TMP3]] to [10 x i32]** 467 // CHECK2-NEXT: [[TMP5:%.*]] = load [10 x i32]*, [10 x i32]** [[TMP4]], align 8 468 // CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8*, i8** [[TMP2]], i64 1 469 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast i8** [[TMP6]] to i32** 470 // CHECK2-NEXT: [[TMP8:%.*]] = load i32*, i32** [[TMP7]], align 8 471 // CHECK2-NEXT: call void @__omp_outlined__(i32* [[DOTADDR1]], i32* [[DOTZERO_ADDR]], [10 x i32]* [[TMP5]], i32* [[TMP8]]) #[[ATTR3]] 472 // CHECK2-NEXT: ret void 473 // 474