1; RUN: llc -O2 -mtriple amdgcn--amdhsa -mcpu=fiji -amdgpu-scalarize-global-loads=true -verify-machineinstrs < %s | FileCheck %s 2 3; uniform loads 4; CHECK-LABEL: @uniform_load 5; CHECK: s_load_dwordx4 6; CHECK-NOT: flat_load_dword 7 8define amdgpu_kernel void @uniform_load(float addrspace(1)* %arg, float addrspace(1)* %arg1) { 9bb: 10 %tmp2 = load float, float addrspace(1)* %arg, align 4, !tbaa !8 11 %tmp3 = fadd float %tmp2, 0.000000e+00 12 %tmp4 = getelementptr inbounds float, float addrspace(1)* %arg, i64 1 13 %tmp5 = load float, float addrspace(1)* %tmp4, align 4, !tbaa !8 14 %tmp6 = fadd float %tmp3, %tmp5 15 %tmp7 = getelementptr inbounds float, float addrspace(1)* %arg, i64 2 16 %tmp8 = load float, float addrspace(1)* %tmp7, align 4, !tbaa !8 17 %tmp9 = fadd float %tmp6, %tmp8 18 %tmp10 = getelementptr inbounds float, float addrspace(1)* %arg, i64 3 19 %tmp11 = load float, float addrspace(1)* %tmp10, align 4, !tbaa !8 20 %tmp12 = fadd float %tmp9, %tmp11 21 %tmp13 = getelementptr inbounds float, float addrspace(1)* %arg1 22 store float %tmp12, float addrspace(1)* %tmp13, align 4, !tbaa !8 23 ret void 24} 25 26; non-uniform loads 27; CHECK-LABEL: @non-uniform_load 28; CHECK: flat_load_dword 29; CHECK-NOT: s_load_dwordx4 30 31define amdgpu_kernel void @non-uniform_load(float addrspace(1)* %arg, float addrspace(1)* %arg1) #0 { 32bb: 33 %tmp = call i32 @llvm.amdgcn.workitem.id.x() #1 34 %tmp2 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp 35 %tmp3 = load float, float addrspace(1)* %tmp2, align 4, !tbaa !8 36 %tmp4 = fadd float %tmp3, 0.000000e+00 37 %tmp5 = add i32 %tmp, 1 38 %tmp6 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp5 39 %tmp7 = load float, float addrspace(1)* %tmp6, align 4, !tbaa !8 40 %tmp8 = fadd float %tmp4, %tmp7 41 %tmp9 = add i32 %tmp, 2 42 %tmp10 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp9 43 %tmp11 = load float, float addrspace(1)* %tmp10, align 4, !tbaa !8 44 %tmp12 = fadd float %tmp8, %tmp11 45 %tmp13 = add i32 %tmp, 3 46 %tmp14 = getelementptr inbounds float, float addrspace(1)* %arg, i32 %tmp13 47 %tmp15 = load float, float addrspace(1)* %tmp14, align 4, !tbaa !8 48 %tmp16 = fadd float %tmp12, %tmp15 49 %tmp17 = getelementptr inbounds float, float addrspace(1)* %arg1, i32 %tmp 50 store float %tmp16, float addrspace(1)* %tmp17, align 4, !tbaa !8 51 ret void 52} 53 54 55; uniform load dominated by no-alias store - scalarize 56; CHECK-LABEL: @no_memdep_alias_arg 57; CHECK: flat_store_dword 58; CHECK: s_load_dword [[SVAL:s[0-9]+]] 59; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]] 60; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]] 61 62define amdgpu_kernel void @no_memdep_alias_arg(i32 addrspace(1)* noalias %in, i32 addrspace(1)* %out0, i32 addrspace(1)* %out1) { 63 store i32 0, i32 addrspace(1)* %out0 64 %val = load i32, i32 addrspace(1)* %in 65 store i32 %val, i32 addrspace(1)* %out1 66 ret void 67} 68 69; uniform load dominated by alias store - vector 70; CHECK-LABEL: {{^}}memdep: 71; CHECK: flat_store_dword 72; CHECK: flat_load_dword [[VVAL:v[0-9]+]] 73; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]] 74define amdgpu_kernel void @memdep(i32 addrspace(1)* %in, i32 addrspace(1)* %out0, i32 addrspace(1)* %out1) { 75 store i32 0, i32 addrspace(1)* %out0 76 %val = load i32, i32 addrspace(1)* %in 77 store i32 %val, i32 addrspace(1)* %out1 78 ret void 79} 80 81; uniform load from global array 82; CHECK-LABEL: @global_array 83; CHECK: s_load_dwordx2 [[A_ADDR:s\[[0-9]+:[0-9]+\]]] 84; CHECK: s_load_dwordx2 [[A_ADDR1:s\[[0-9]+:[0-9]+\]]], [[A_ADDR]], 0x0 85; CHECK: s_load_dword [[SVAL:s[0-9]+]], [[A_ADDR1]], 0x0 86; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]+]], [[SVAL]] 87; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]] 88 89@A = common local_unnamed_addr addrspace(1) global i32 addrspace(1)* null, align 4 90 91define amdgpu_kernel void @global_array(i32 addrspace(1)* nocapture %out) { 92entry: 93 %0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @A, align 4 94 %1 = load i32, i32 addrspace(1)* %0, align 4 95 store i32 %1, i32 addrspace(1)* %out, align 4 96 ret void 97} 98 99 100; uniform load from global array dominated by alias store 101; CHECK-LABEL: @global_array_alias_store 102; CHECK: flat_store_dword 103; CHECK: v_mov_b32_e32 v[[ADDR_LO:[0-9]+]], s{{[0-9]+}} 104; CHECK: v_mov_b32_e32 v[[ADDR_HI:[0-9]+]], s{{[0-9]+}} 105; CHECK: flat_load_dwordx2 [[A_ADDR:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[ADDR_LO]]:[[ADDR_HI]]{{\]}} 106; CHECK: flat_load_dword [[VVAL:v[0-9]+]], [[A_ADDR]] 107; CHECK: flat_store_dword v[{{[0-9]+:[0-9]+}}], [[VVAL]] 108define amdgpu_kernel void @global_array_alias_store(i32 addrspace(1)* nocapture %out, i32 %n) { 109entry: 110 %gep = getelementptr i32, i32 addrspace(1) * %out, i32 %n 111 store i32 12, i32 addrspace(1) * %gep 112 %0 = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(1)* @A, align 4 113 %1 = load i32, i32 addrspace(1)* %0, align 4 114 store i32 %1, i32 addrspace(1)* %out, align 4 115 ret void 116} 117 118 119declare i32 @llvm.amdgcn.workitem.id.x() #1 120 121attributes #1 = { nounwind readnone } 122 123!8 = !{!9, !9, i64 0} 124!9 = !{!"float", !10, i64 0} 125!10 = !{!"omnipotent char", !11, i64 0} 126!11 = !{!"Simple C/C++ TBAA"} 127