1; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-ALLOCA %s 2; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=tonga -mattr=+promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-PROMOTE %s 3; RUN: opt -S -mtriple=amdgcn-- -amdgpu-promote-alloca -sroa -instcombine < %s | FileCheck -check-prefix=OPT %s 4 5target datalayout = "A5" 6 7; OPT-LABEL: @vector_read_alloca_bitcast( 8; OPT-NOT: alloca 9; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index 10; OPT-NEXT: store i32 %0, i32 addrspace(1)* %out, align 4 11 12; GCN-LABEL: {{^}}vector_read_alloca_bitcast: 13; GCN-ALLOCA-COUNT-4: buffer_store_dword 14; GCN-ALLOCA: buffer_load_dword 15 16; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2 17; GCN-PROMOTE: s_cmp_eq_u32 s{{[0-9]+}}, 1 18; GCN-PROMOTE: s_cselect_b64 [[CC1:[^,]+]], -1, 0 19; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]] 20; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0 21; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3 22; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc 23; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0 24; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc 25; GCN-PROMOTE: ScratchSize: 0 26 27define amdgpu_kernel void @vector_read_alloca_bitcast(i32 addrspace(1)* %out, i32 %index) { 28entry: 29 %tmp = alloca [4 x i32], addrspace(5) 30 %x = bitcast [4 x i32] addrspace(5)* %tmp to i32 addrspace(5)* 31 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1 32 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2 33 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3 34 store i32 0, i32 addrspace(5)* %x 35 store i32 1, i32 addrspace(5)* %y 36 store i32 2, i32 addrspace(5)* %z 37 store i32 3, i32 addrspace(5)* %w 38 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %index 39 %tmp2 = load i32, i32 addrspace(5)* %tmp1 40 store i32 %tmp2, i32 addrspace(1)* %out 41 ret void 42} 43 44; OPT-LABEL: @vector_write_alloca_bitcast( 45; OPT-NOT: alloca 46; OPT: %0 = insertelement <4 x i32> zeroinitializer, i32 1, i32 %w_index 47; OPT-NEXT: %1 = extractelement <4 x i32> %0, i32 %r_index 48; OPT-NEXT: store i32 %1, i32 addrspace(1)* %out, align 49 50; GCN-LABEL: {{^}}vector_write_alloca_bitcast: 51; GCN-ALLOCA-COUNT-5: buffer_store_dword 52; GCN-ALLOCA: buffer_load_dword 53 54; GCN-PROMOTE-COUNT-7: v_cndmask 55 56; GCN-PROMOTE: ScratchSize: 0 57 58define amdgpu_kernel void @vector_write_alloca_bitcast(i32 addrspace(1)* %out, i32 %w_index, i32 %r_index) { 59entry: 60 %tmp = alloca [4 x i32], addrspace(5) 61 %x = bitcast [4 x i32] addrspace(5)* %tmp to i32 addrspace(5)* 62 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1 63 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2 64 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3 65 store i32 0, i32 addrspace(5)* %x 66 store i32 0, i32 addrspace(5)* %y 67 store i32 0, i32 addrspace(5)* %z 68 store i32 0, i32 addrspace(5)* %w 69 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %w_index 70 store i32 1, i32 addrspace(5)* %tmp1 71 %tmp2 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %r_index 72 %tmp3 = load i32, i32 addrspace(5)* %tmp2 73 store i32 %tmp3, i32 addrspace(1)* %out 74 ret void 75} 76 77; OPT-LABEL: @vector_write_read_bitcast_to_float( 78; OPT-NOT: alloca 79; OPT: bb2: 80; OPT: %tmp.sroa.0.0 = phi <6 x float> [ undef, %bb ], [ %0, %bb2 ] 81; OPT: %0 = insertelement <6 x float> %tmp.sroa.0.0, float %tmp73, i32 %tmp10 82; OPT: .preheader: 83; OPT: %bc = bitcast <6 x float> %0 to <6 x i32> 84; OPT: %1 = extractelement <6 x i32> %bc, i32 %tmp20 85 86; GCN-LABEL: {{^}}vector_write_read_bitcast_to_float: 87; GCN-ALLOCA: buffer_store_dword 88 89; GCN-PROMOTE-COUNT-6: v_cmp_eq_u16 90; GCN-PROMOTE-COUNT-6: v_cndmask 91 92; GCN: s_cbranch 93 94; GCN-ALLOCA: buffer_load_dword 95 96; GCN-PROMOTE: v_cmp_eq_u16 97; GCN-PROMOTE: v_cndmask 98; GCN-PROMOTE: v_cmp_eq_u16 99; GCN-PROMOTE: v_cndmask 100; GCN-PROMOTE: v_cmp_eq_u16 101; GCN-PROMOTE: v_cndmask 102; GCN-PROMOTE: v_cmp_eq_u16 103; GCN-PROMOTE: v_cndmask 104; GCN-PROMOTE: v_cmp_eq_u16 105; GCN-PROMOTE: v_cndmask 106 107; GCN-PROMOTE: ScratchSize: 0 108 109define amdgpu_kernel void @vector_write_read_bitcast_to_float(float addrspace(1)* %arg) { 110bb: 111 %tmp = alloca [6 x float], align 4, addrspace(5) 112 %tmp1 = bitcast [6 x float] addrspace(5)* %tmp to i8 addrspace(5)* 113 call void @llvm.lifetime.start.p5i8(i64 24, i8 addrspace(5)* %tmp1) #2 114 br label %bb2 115 116bb2: ; preds = %bb2, %bb 117 %tmp3 = phi i32 [ 0, %bb ], [ %tmp13, %bb2 ] 118 %tmp4 = zext i32 %tmp3 to i64 119 %tmp5 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp4 120 %tmp6 = bitcast float addrspace(1)* %tmp5 to i32 addrspace(1)* 121 %tmp7 = load i32, i32 addrspace(1)* %tmp6, align 4 122 %tmp8 = trunc i32 %tmp3 to i16 123 %tmp9 = urem i16 %tmp8, 6 124 %tmp10 = zext i16 %tmp9 to i32 125 %tmp11 = getelementptr inbounds [6 x float], [6 x float] addrspace(5)* %tmp, i32 0, i32 %tmp10 126 %tmp12 = bitcast float addrspace(5)* %tmp11 to i32 addrspace(5)* 127 store i32 %tmp7, i32 addrspace(5)* %tmp12, align 4 128 %tmp13 = add nuw nsw i32 %tmp3, 1 129 %tmp14 = icmp eq i32 %tmp13, 1000 130 br i1 %tmp14, label %.preheader, label %bb2 131 132bb15: ; preds = %.preheader 133 call void @llvm.lifetime.end.p5i8(i64 24, i8 addrspace(5)* %tmp1) #2 134 ret void 135 136.preheader: ; preds = %.preheader, %bb2 137 %tmp16 = phi i32 [ %tmp27, %.preheader ], [ 0, %bb2 ] 138 %tmp17 = trunc i32 %tmp16 to i16 139 %tmp18 = urem i16 %tmp17, 6 140 %tmp19 = sub nuw nsw i16 5, %tmp18 141 %tmp20 = zext i16 %tmp19 to i32 142 %tmp21 = getelementptr inbounds [6 x float], [6 x float] addrspace(5)* %tmp, i32 0, i32 %tmp20 143 %tmp22 = bitcast float addrspace(5)* %tmp21 to i32 addrspace(5)* 144 %tmp23 = load i32, i32 addrspace(5)* %tmp22, align 4 145 %tmp24 = zext i32 %tmp16 to i64 146 %tmp25 = getelementptr inbounds float, float addrspace(1)* %arg, i64 %tmp24 147 %tmp26 = bitcast float addrspace(1)* %tmp25 to i32 addrspace(1)* 148 store i32 %tmp23, i32 addrspace(1)* %tmp26, align 4 149 %tmp27 = add nuw nsw i32 %tmp16, 1 150 %tmp28 = icmp eq i32 %tmp27, 1000 151 br i1 %tmp28, label %bb15, label %.preheader 152} 153 154; OPT-LABEL: @vector_write_read_bitcast_to_double( 155; OPT-NOT: alloca 156; OPT: bb2: 157; OPT: %tmp.sroa.0.0 = phi <6 x double> [ undef, %bb ], [ %0, %bb2 ] 158; OPT: %0 = insertelement <6 x double> %tmp.sroa.0.0, double %tmp73, i32 %tmp10 159; OPT: .preheader: 160; OPT: %bc = bitcast <6 x double> %0 to <6 x i64> 161; OPT: %1 = extractelement <6 x i64> %bc, i32 %tmp20 162 163; GCN-LABEL: {{^}}vector_write_read_bitcast_to_double: 164 165; GCN-ALLOCA-COUNT-2: buffer_store_dword 166; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32 167 168; GCN: s_cbranch 169 170; GCN-ALLOCA-COUNT-2: buffer_load_dword 171; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32 172 173; GCN-PROMOTE: ScratchSize: 0 174 175define amdgpu_kernel void @vector_write_read_bitcast_to_double(double addrspace(1)* %arg) { 176bb: 177 %tmp = alloca [6 x double], align 8, addrspace(5) 178 %tmp1 = bitcast [6 x double] addrspace(5)* %tmp to i8 addrspace(5)* 179 call void @llvm.lifetime.start.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2 180 br label %bb2 181 182bb2: ; preds = %bb2, %bb 183 %tmp3 = phi i32 [ 0, %bb ], [ %tmp13, %bb2 ] 184 %tmp4 = zext i32 %tmp3 to i64 185 %tmp5 = getelementptr inbounds double, double addrspace(1)* %arg, i64 %tmp4 186 %tmp6 = bitcast double addrspace(1)* %tmp5 to i64 addrspace(1)* 187 %tmp7 = load i64, i64 addrspace(1)* %tmp6, align 8 188 %tmp8 = trunc i32 %tmp3 to i16 189 %tmp9 = urem i16 %tmp8, 6 190 %tmp10 = zext i16 %tmp9 to i32 191 %tmp11 = getelementptr inbounds [6 x double], [6 x double] addrspace(5)* %tmp, i32 0, i32 %tmp10 192 %tmp12 = bitcast double addrspace(5)* %tmp11 to i64 addrspace(5)* 193 store i64 %tmp7, i64 addrspace(5)* %tmp12, align 8 194 %tmp13 = add nuw nsw i32 %tmp3, 1 195 %tmp14 = icmp eq i32 %tmp13, 1000 196 br i1 %tmp14, label %.preheader, label %bb2 197 198bb15: ; preds = %.preheader 199 call void @llvm.lifetime.end.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2 200 ret void 201 202.preheader: ; preds = %.preheader, %bb2 203 %tmp16 = phi i32 [ %tmp27, %.preheader ], [ 0, %bb2 ] 204 %tmp17 = trunc i32 %tmp16 to i16 205 %tmp18 = urem i16 %tmp17, 6 206 %tmp19 = sub nuw nsw i16 5, %tmp18 207 %tmp20 = zext i16 %tmp19 to i32 208 %tmp21 = getelementptr inbounds [6 x double], [6 x double] addrspace(5)* %tmp, i32 0, i32 %tmp20 209 %tmp22 = bitcast double addrspace(5)* %tmp21 to i64 addrspace(5)* 210 %tmp23 = load i64, i64 addrspace(5)* %tmp22, align 8 211 %tmp24 = zext i32 %tmp16 to i64 212 %tmp25 = getelementptr inbounds double, double addrspace(1)* %arg, i64 %tmp24 213 %tmp26 = bitcast double addrspace(1)* %tmp25 to i64 addrspace(1)* 214 store i64 %tmp23, i64 addrspace(1)* %tmp26, align 8 215 %tmp27 = add nuw nsw i32 %tmp16, 1 216 %tmp28 = icmp eq i32 %tmp27, 1000 217 br i1 %tmp28, label %bb15, label %.preheader 218} 219 220; OPT-LABEL: @vector_write_read_bitcast_to_i64( 221; OPT-NOT: alloca 222; OPT: bb2: 223; OPT: %tmp.sroa.0.0 = phi <6 x i64> [ undef, %bb ], [ %0, %bb2 ] 224; OPT: %0 = insertelement <6 x i64> %tmp.sroa.0.0, i64 %tmp6, i32 %tmp9 225; OPT: .preheader: 226; OPT: %1 = extractelement <6 x i64> %0, i32 %tmp18 227 228; GCN-LABEL: {{^}}vector_write_read_bitcast_to_i64: 229 230; GCN-ALLOCA-COUNT-2: buffer_store_dword 231; GCN-PROMOTE-COUNT-2: v_movreld_b32_e32 232 233; GCN: s_cbranch 234 235; GCN-ALLOCA-COUNT-2: buffer_load_dword 236; GCN-PROMOTE-COUNT-2: v_movrels_b32_e32 237 238; GCN-PROMOTE: ScratchSize: 0 239 240define amdgpu_kernel void @vector_write_read_bitcast_to_i64(i64 addrspace(1)* %arg) { 241bb: 242 %tmp = alloca [6 x i64], align 8, addrspace(5) 243 %tmp1 = bitcast [6 x i64] addrspace(5)* %tmp to i8 addrspace(5)* 244 call void @llvm.lifetime.start.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2 245 br label %bb2 246 247bb2: ; preds = %bb2, %bb 248 %tmp3 = phi i32 [ 0, %bb ], [ %tmp11, %bb2 ] 249 %tmp4 = zext i32 %tmp3 to i64 250 %tmp5 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp4 251 %tmp6 = load i64, i64 addrspace(1)* %tmp5, align 8 252 %tmp7 = trunc i32 %tmp3 to i16 253 %tmp8 = urem i16 %tmp7, 6 254 %tmp9 = zext i16 %tmp8 to i32 255 %tmp10 = getelementptr inbounds [6 x i64], [6 x i64] addrspace(5)* %tmp, i32 0, i32 %tmp9 256 store i64 %tmp6, i64 addrspace(5)* %tmp10, align 8 257 %tmp11 = add nuw nsw i32 %tmp3, 1 258 %tmp12 = icmp eq i32 %tmp11, 1000 259 br i1 %tmp12, label %.preheader, label %bb2 260 261bb13: ; preds = %.preheader 262 call void @llvm.lifetime.end.p5i8(i64 48, i8 addrspace(5)* %tmp1) #2 263 ret void 264 265.preheader: ; preds = %.preheader, %bb2 266 %tmp14 = phi i32 [ %tmp23, %.preheader ], [ 0, %bb2 ] 267 %tmp15 = trunc i32 %tmp14 to i16 268 %tmp16 = urem i16 %tmp15, 6 269 %tmp17 = sub nuw nsw i16 5, %tmp16 270 %tmp18 = zext i16 %tmp17 to i32 271 %tmp19 = getelementptr inbounds [6 x i64], [6 x i64] addrspace(5)* %tmp, i32 0, i32 %tmp18 272 %tmp20 = load i64, i64 addrspace(5)* %tmp19, align 8 273 %tmp21 = zext i32 %tmp14 to i64 274 %tmp22 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp21 275 store i64 %tmp20, i64 addrspace(1)* %tmp22, align 8 276 %tmp23 = add nuw nsw i32 %tmp14, 1 277 %tmp24 = icmp eq i32 %tmp23, 1000 278 br i1 %tmp24, label %bb13, label %.preheader 279} 280 281; TODO: llvm.assume can be ingored 282 283; OPT-LABEL: @vector_read_alloca_bitcast_assume( 284; OPT: %tmp = alloca <4 x i32>, align 16, addrspace(5) 285; OPT-NEXT: %x = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %tmp, i64 0, i64 0 286; OPT-NEXT: store i32 0, i32 addrspace(5)* %x, align 16 287; OPT-NEXT: %0 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp, align 16 288; OPT-NEXT: %1 = shufflevector <4 x i32> %0, <4 x i32> <i32 poison, i32 1, i32 2, i32 3>, <4 x i32> <i32 0, i32 5, i32 6, i32 7> 289; OPT-NEXT: store <4 x i32> %1, <4 x i32> addrspace(5)* %tmp, align 16 290; OPT-NEXT: %2 = extractelement <4 x i32> %1, i32 %index 291; OPT-NEXT: store i32 %2, i32 addrspace(1)* %out, align 4 292 293; GCN-LABEL: {{^}}vector_read_alloca_bitcast_assume: 294; GCN-COUNT-4: buffer_store_dword 295 296define amdgpu_kernel void @vector_read_alloca_bitcast_assume(i32 addrspace(1)* %out, i32 %index) { 297entry: 298 %tmp = alloca [4 x i32], addrspace(5) 299 %x = bitcast [4 x i32] addrspace(5)* %tmp to i32 addrspace(5)* 300 %cmp = icmp ne i32 addrspace(5)* %x, null 301 call void @llvm.assume(i1 %cmp) 302 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1 303 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2 304 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3 305 store i32 0, i32 addrspace(5)* %x 306 store i32 1, i32 addrspace(5)* %y 307 store i32 2, i32 addrspace(5)* %z 308 store i32 3, i32 addrspace(5)* %w 309 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %index 310 %tmp2 = load i32, i32 addrspace(5)* %tmp1 311 store i32 %tmp2, i32 addrspace(1)* %out 312 ret void 313} 314 315; OPT-LABEL: @vector_read_alloca_multiuse( 316; OPT-NOT: alloca 317; OPT: %0 = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index 318; OPT-NEXT: %add2 = add nuw nsw i32 %0, 1 319; OPT-NEXT: store i32 %add2, i32 addrspace(1)* %out, align 4 320 321; GCN-LABEL: {{^}}vector_read_alloca_multiuse: 322; GCN-ALLOCA-COUNT-4: buffer_store_dword 323; GCN-ALLOCA: buffer_load_dword 324 325; GCN-PROMOTE: s_cmp_eq_u32 s{{[0-9]+}}, 1 326; GCN-PROMOTE: s_cselect_b64 [[CC1:[^,]+]], -1, 0 327; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 2 328; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND1:v[0-9]+]], 0, 1, [[CC1]] 329; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0 330; GCN_PROMOTE: s_cmp_lg_u32 s{{[0-9]+}}, 3 331; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND2:v[0-9]+]], 2, [[IND1]], vcc 332; GCN-PROMOTE: s_cselect_b64 vcc, -1, 0 333; GCN-PROMOTE: v_cndmask_b32_e{{32|64}} [[IND3:v[0-9]+]], 3, [[IND2]], vcc 334 335; GCN-PROMOTE: ScratchSize: 0 336 337define amdgpu_kernel void @vector_read_alloca_multiuse(i32 addrspace(1)* %out, i32 %index) { 338entry: 339 %tmp = alloca [4 x i32], addrspace(5) 340 %b = bitcast [4 x i32] addrspace(5)* %tmp to float addrspace(5)* 341 %x = bitcast float addrspace(5)* %b to i32 addrspace(5)* 342 %y = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 1 343 %z = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 2 344 %w = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 3 345 store i32 0, i32 addrspace(5)* %x 346 store i32 1, i32 addrspace(5)* %y 347 store i32 2, i32 addrspace(5)* %z 348 store i32 3, i32 addrspace(5)* %w 349 %tmp1 = getelementptr [4 x i32], [4 x i32] addrspace(5)* %tmp, i32 0, i32 %index 350 %tmp2 = load i32, i32 addrspace(5)* %tmp1 351 %tmp3 = load i32, i32 addrspace(5)* %x 352 %tmp4 = load i32, i32 addrspace(5)* %y 353 %add1 = add i32 %tmp2, %tmp3 354 %add2 = add i32 %add1, %tmp4 355 store i32 %add2, i32 addrspace(1)* %out 356 ret void 357} 358 359; OPT-LABEL: @bitcast_vector_to_vector( 360; OPT-NOT: alloca 361; OPT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(1)* %out, align 16 362 363; GCN-LABEL: {{^}}bitcast_vector_to_vector: 364; GCN: v_mov_b32_e32 v0, 1 365; GCN: v_mov_b32_e32 v1, 2 366; GCN: v_mov_b32_e32 v2, 3 367; GCN: v_mov_b32_e32 v3, 4 368 369; GCN: ScratchSize: 0 370 371define amdgpu_kernel void @bitcast_vector_to_vector(<4 x i32> addrspace(1)* %out) { 372.entry: 373 %alloca = alloca <4 x float>, align 16, addrspace(5) 374 %cast = bitcast <4 x float> addrspace(5)* %alloca to <4 x i32> addrspace(5)* 375 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %cast 376 %load = load <4 x i32>, <4 x i32> addrspace(5)* %cast, align 16 377 store <4 x i32> %load, <4 x i32> addrspace(1)* %out 378 ret void 379} 380 381; OPT-LABEL: @vector_bitcast_from_alloca_array( 382; OPT-NOT: alloca 383; OPT: store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(1)* %out, align 16 384 385; GCN-LABEL: {{^}}vector_bitcast_from_alloca_array: 386; GCN: v_mov_b32_e32 v0, 1 387; GCN: v_mov_b32_e32 v1, 2 388; GCN: v_mov_b32_e32 v2, 3 389; GCN: v_mov_b32_e32 v3, 4 390 391; GCN: ScratchSize: 0 392 393define amdgpu_kernel void @vector_bitcast_from_alloca_array(<4 x i32> addrspace(1)* %out) { 394.entry: 395 %alloca = alloca [4 x float], align 16, addrspace(5) 396 %cast = bitcast [4 x float] addrspace(5)* %alloca to <4 x i32> addrspace(5)* 397 store <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %cast 398 %load = load <4 x i32>, <4 x i32> addrspace(5)* %cast, align 16 399 store <4 x i32> %load, <4 x i32> addrspace(1)* %out 400 ret void 401} 402 403; OPT-LABEL: @vector_bitcast_to_array_from_alloca_array( 404; OPT-NOT: alloca 405; OPT: %out.repack = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 0 406; OPT-NEXT: store i32 1, i32 addrspace(1)* %out.repack, align 4 407; OPT-NEXT: %out.repack1 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 1 408; OPT-NEXT: store i32 2, i32 addrspace(1)* %out.repack1, align 4 409; OPT-NEXT: %out.repack2 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 2 410; OPT-NEXT: store i32 3, i32 addrspace(1)* %out.repack2, align 4 411; OPT-NEXT: %out.repack3 = getelementptr inbounds [4 x i32], [4 x i32] addrspace(1)* %out, i64 0, i64 3 412; OPT-NEXT: store i32 4, i32 addrspace(1)* %out.repack3, align 4 413 414; GCN-LABEL: {{^}}vector_bitcast_to_array_from_alloca_array: 415; GCN: v_mov_b32_e32 v0, 1 416; GCN: v_mov_b32_e32 v1, 2 417; GCN: v_mov_b32_e32 v2, 3 418; GCN: v_mov_b32_e32 v3, 4 419 420; GCN: ScratchSize: 0 421 422define amdgpu_kernel void @vector_bitcast_to_array_from_alloca_array([4 x i32] addrspace(1)* %out) { 423.entry: 424 %alloca = alloca [4 x float], align 16, addrspace(5) 425 %cast = bitcast [4 x float] addrspace(5)* %alloca to [4 x i32] addrspace(5)* 426 store [4 x i32] [i32 1, i32 2, i32 3, i32 4], [4 x i32] addrspace(5)* %cast 427 %load = load [4 x i32], [4 x i32] addrspace(5)* %cast, align 16 428 store [4 x i32] %load, [4 x i32] addrspace(1)* %out 429 ret void 430} 431 432; OPT-LABEL: @vector_bitcast_to_struct_from_alloca_array( 433; OPT-NOT: alloca 434; OPT: %out.repack = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 0 435; OPT-NEXT: store i32 1, i32 addrspace(1)* %out.repack, align 4 436; OPT-NEXT: %out.repack1 = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 1 437; OPT-NEXT: store i32 2, i32 addrspace(1)* %out.repack1, align 4 438; OPT-NEXT: %out.repack2 = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 2 439; OPT-NEXT: store i32 3, i32 addrspace(1)* %out.repack2, align 4 440; OPT-NEXT: %out.repack3 = getelementptr inbounds %struct.v4, %struct.v4 addrspace(1)* %out, i64 0, i32 3 441; OPT-NEXT: store i32 4, i32 addrspace(1)* %out.repack3, align 4 442 443; GCN-LABEL: {{^}}vector_bitcast_to_struct_from_alloca_array: 444; GCN: v_mov_b32_e32 v0, 1 445; GCN: v_mov_b32_e32 v1, 2 446; GCN: v_mov_b32_e32 v2, 3 447; GCN: v_mov_b32_e32 v3, 4 448 449; GCN: ScratchSize: 0 450 451%struct.v4 = type { i32, i32, i32, i32 } 452 453define amdgpu_kernel void @vector_bitcast_to_struct_from_alloca_array(%struct.v4 addrspace(1)* %out) { 454.entry: 455 %alloca = alloca [4 x float], align 16, addrspace(5) 456 %cast = bitcast [4 x float] addrspace(5)* %alloca to %struct.v4 addrspace(5)* 457 store %struct.v4 { i32 1, i32 2, i32 3, i32 4 }, %struct.v4 addrspace(5)* %cast 458 %load = load %struct.v4, %struct.v4 addrspace(5)* %cast, align 16 459 store %struct.v4 %load, %struct.v4 addrspace(1)* %out 460 ret void 461} 462 463declare void @llvm.lifetime.start.p5i8(i64 immarg, i8 addrspace(5)* nocapture) 464 465declare void @llvm.lifetime.end.p5i8(i64 immarg, i8 addrspace(5)* nocapture) 466 467declare void @llvm.assume(i1) 468