1; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -show-mc-encoding < %s | FileCheck --check-prefixes=SI,GCN,SICIVI,SICI,SIVIGFX9_10 %s 2; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -show-mc-encoding < %s | FileCheck --check-prefixes=CI,GCN,SICIVI,SICI %s 3; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -show-mc-encoding < %s | FileCheck --check-prefixes=VI,GCN,SICIVI,VIGFX9_10,SIVIGFX9_10 %s 4; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -show-mc-encoding < %s | FileCheck --check-prefixes=GFX9_10,GCN,VIGFX9_10,SIVIGFX9_10 %s 5; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -show-mc-encoding < %s | FileCheck --check-prefixes=GFX10,GFX9_10,GCN,VIGFX9_10,SIVIGFX9_10 %s 6 7; SMRD load with an immediate offset. 8; GCN-LABEL: {{^}}smrd0: 9; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x1 ; encoding: [0x01 10; VIGFX9_10: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 11define amdgpu_kernel void @smrd0(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 { 12entry: 13 %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 1 14 %tmp1 = load i32, i32 addrspace(4)* %tmp 15 store i32 %tmp1, i32 addrspace(1)* %out 16 ret void 17} 18 19; SMRD load with the largest possible immediate offset. 20; GCN-LABEL: {{^}}smrd1: 21; SICI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff,0x{{[0-9]+[137]}} 22; VIGFX9_10: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3fc 23define amdgpu_kernel void @smrd1(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 { 24entry: 25 %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 255 26 %tmp1 = load i32, i32 addrspace(4)* %tmp 27 store i32 %tmp1, i32 addrspace(1)* %out 28 ret void 29} 30 31; SMRD load with an offset greater than the largest possible immediate. 32; GCN-LABEL: {{^}}smrd2: 33; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400 34; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]] 35; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100 36; VIGFX9_10: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x400 37; GCN: s_endpgm 38define amdgpu_kernel void @smrd2(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 { 39entry: 40 %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 256 41 %tmp1 = load i32, i32 addrspace(4)* %tmp 42 store i32 %tmp1, i32 addrspace(1)* %out 43 ret void 44} 45 46; SMRD load with a 64-bit offset 47; GCN-LABEL: {{^}}smrd3: 48; FIXME: There are too many copies here because we don't fold immediates 49; through REG_SEQUENCE 50; SI: s_load_dwordx2 s[{{[0-9]:[0-9]}}], s[{{[0-9]:[0-9]}}], 0x13 ; encoding: [0x13 51; TODO: Add VI checks 52; GCN: s_endpgm 53define amdgpu_kernel void @smrd3(i32 addrspace(1)* %out, [8 x i32], i32 addrspace(4)* %ptr) #0 { 54entry: 55 %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 4294967296 56 %tmp1 = load i32, i32 addrspace(4)* %tmp 57 store i32 %tmp1, i32 addrspace(1)* %out 58 ret void 59} 60 61; SMRD load with the largest possible immediate offset on VI 62; GCN-LABEL: {{^}}smrd4: 63; SI: s_mov_b32 [[OFFSET:s[0-9]+]], 0xffffc 64; SI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]] 65; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff 66; VI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xffffc 67; GFX9_10: s_mov_b32 [[OFFSET:s[0-9]+]], 0xffffc 68; GFX9_10: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]] 69define amdgpu_kernel void @smrd4(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 { 70entry: 71 %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 262143 72 %tmp1 = load i32, i32 addrspace(4)* %tmp 73 store i32 %tmp1, i32 addrspace(1)* %out 74 ret void 75} 76 77; SMRD load with an offset greater than the largest possible immediate on VI 78; GCN-LABEL: {{^}}smrd5: 79; SIVIGFX9_10: s_mov_b32 [[OFFSET:s[0-9]+]], 0x100000 80; SIVIGFX9_10: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]] 81; CI: s_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000 82; GCN: s_endpgm 83define amdgpu_kernel void @smrd5(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 { 84entry: 85 %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 262144 86 %tmp1 = load i32, i32 addrspace(4)* %tmp 87 store i32 %tmp1, i32 addrspace(1)* %out 88 ret void 89} 90 91; GFX9_10 can use a signed immediate byte offset 92; GCN-LABEL: {{^}}smrd6: 93; SICIVI: s_add_u32 s{{[0-9]}}, s{{[0-9]}}, -4 94; SICIVI: s_load_dword s{{[0-9]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0 95; GFX9_10: s_load_dword s{{[0-9]}}, s{{\[[0-9]+:[0-9]+\]}}, -0x4 96define amdgpu_kernel void @smrd6(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 { 97entry: 98 %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 -1 99 %tmp1 = load i32, i32 addrspace(4)* %tmp 100 store i32 %tmp1, i32 addrspace(1)* %out 101 ret void 102} 103 104; Don't use a negative SGPR offset 105; GCN-LABEL: {{^}}smrd7: 106; GCN: s_add_u32 s{{[0-9]}}, s{{[0-9]}}, 0xffe00000 107; SICIVI: s_load_dword s{{[0-9]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0 108; GFX9_10: s_load_dword s{{[0-9]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0 109define amdgpu_kernel void @smrd7(i32 addrspace(1)* %out, i32 addrspace(4)* %ptr) #0 { 110entry: 111 %tmp = getelementptr i32, i32 addrspace(4)* %ptr, i64 -524288 112 %tmp1 = load i32, i32 addrspace(4)* %tmp 113 store i32 %tmp1, i32 addrspace(1)* %out 114 ret void 115} 116 117; GCN-LABEL: {{^}}smrd_hazard: 118; GCN-DAG: s_mov_b32 s3, 3 119; GCN-DAG: s_mov_b32 s2, 2 120; GCN-DAG: s_mov_b32 s1, 1 121; GCN-DAG: s_mov_b32 s0, 0 122; SI-NEXT: nop 3 123; GCN-NEXT: s_buffer_load_dword s0, s[0:3], 0x0 124define amdgpu_ps float @smrd_hazard(<4 x i32> inreg %desc) #0 { 125main_body: 126 %d0 = insertelement <4 x i32> undef, i32 0, i32 0 127 %d1 = insertelement <4 x i32> %d0, i32 1, i32 1 128 %d2 = insertelement <4 x i32> %d1, i32 2, i32 2 129 %d3 = insertelement <4 x i32> %d2, i32 3, i32 3 130 %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %d3, i32 0, i32 0) 131 ret float %r 132} 133 134; SMRD load using the load.const.v4i32 intrinsic with an immediate offset 135; GCN-LABEL: {{^}}smrd_load_const0: 136; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x4 ; encoding: [0x04 137; VIGFX9_10: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x10 138define amdgpu_ps void @smrd_load_const0(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 139main_body: 140 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 141 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp 142 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0) 143 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %tmp21, i1 true, i1 true) #0 144 ret void 145} 146 147; SMRD load using the load.const.v4i32 intrinsic with the largest possible immediate 148; offset. 149; GCN-LABEL: {{^}}smrd_load_const1: 150; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff ; encoding: [0xff 151; SICI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0xff glc ; encoding: [0xff 152; VIGFX9_10: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]+}}], 0x3fc ; 153; VIGFX9_10: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]+}}], 0x3fc glc ; 154define amdgpu_ps void @smrd_load_const1(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 155main_body: 156 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 157 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp 158 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 1020, i32 0) 159 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 160 %s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 1020, i32 1) 161 %s.buffer.float = bitcast i32 %s.buffer to float 162 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %s.buffer.float, i1 true, i1 true) #0 163 ret void 164} 165 166; SMRD load using the load.const.v4i32 intrinsic with an offset greater than the 167; largets possible immediate. 168; immediate offset. 169; GCN-LABEL: {{^}}smrd_load_const2: 170; SI: s_movk_i32 s[[OFFSET:[0-9]]], 0x400 171; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]] 172; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], s[[OFFSET]] ; encoding: [0x0[[OFFSET]] 173; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100 174; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x100 175; VIGFX9_10: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]+}}], 0x400 176; VIGFX9_10: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]+}}], 0x400 177define amdgpu_ps void @smrd_load_const2(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 178main_body: 179 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 180 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp 181 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 1024, i32 0) 182 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 183 %s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 1024, i32 0) 184 %s.buffer.float = bitcast i32 %s.buffer to float 185 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %s.buffer.float, i1 true, i1 true) #0 186 ret void 187} 188 189; SMRD load with the largest possible immediate offset on VI 190; GCN-LABEL: {{^}}smrd_load_const3: 191; SI: s_mov_b32 [[OFFSET:s[0-9]+]], 0xffffc 192; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]] 193; SI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], [[OFFSET]] 194; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff 195; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x3ffff 196; VIGFX9_10: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]+}}], 0xffffc 197; VIGFX9_10: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]+}}], 0xffffc 198define amdgpu_ps void @smrd_load_const3(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 199main_body: 200 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 201 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp 202 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 1048572, i32 0) 203 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 204 %s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 1048572, i32 0) 205 %s.buffer.float = bitcast i32 %s.buffer to float 206 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %s.buffer.float, i1 true, i1 true) #0 207 ret void 208} 209 210; SMRD load with an offset greater than the largest possible immediate on VI 211; GCN-LABEL: {{^}}smrd_load_const4: 212; SIVIGFX9_10: s_mov_b32 [[OFFSET:s[0-9]+]], 0x100000 213; SIVIGFX9_10: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]:[0-9]+}}], [[OFFSET]] 214; SIVIGFX9_10: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]:[0-9]+}}], [[OFFSET]] 215; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000 216; CI: s_buffer_load_dword s{{[0-9]}}, s[{{[0-9]:[0-9]}}], 0x40000 217; GCN: s_endpgm 218define amdgpu_ps void @smrd_load_const4(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 219main_body: 220 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 221 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp 222 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 1048576, i32 0) 223 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 224 %s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 1048576, i32 0) 225 %s.buffer.float = bitcast i32 %s.buffer to float 226 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %s.buffer.float, i1 true, i1 true) #0 227 ret void 228} 229 230; dwordx2 s.buffer.load 231; GCN-LABEL: {{^}}s_buffer_load_dwordx2: 232; VIGFX9_10: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x80 233; SICI: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x20 234define amdgpu_ps void @s_buffer_load_dwordx2(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 235main_body: 236 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 237 %s.buffer = call <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32> %tmp22, i32 128, i32 0) 238 %s.buffer.0 = extractelement <2 x i32> %s.buffer, i32 0 239 %s.buffer.0.float = bitcast i32 %s.buffer.0 to float 240 %s.buffer.1 = extractelement <2 x i32> %s.buffer, i32 1 241 %s.buffer.1.float = bitcast i32 %s.buffer.1 to float 242 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %s.buffer.0.float, float %s.buffer.1.float, float %s.buffer.0.float, float %s.buffer.1.float, i1 true, i1 true) #0 243 ret void 244} 245 246; dwordx4 s.buffer.load 247; GCN-LABEL: {{^}}s_buffer_load_dwordx4: 248; VIGFX9_10: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x80 249; SICI: s_buffer_load_dwordx4 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x20 250define amdgpu_ps void @s_buffer_load_dwordx4(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 251main_body: 252 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 253 %s.buffer = call <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32> %tmp22, i32 128, i32 0) 254 %s.buffer.0 = extractelement <4 x i32> %s.buffer, i32 0 255 %s.buffer.0.float = bitcast i32 %s.buffer.0 to float 256 %s.buffer.1 = extractelement <4 x i32> %s.buffer, i32 1 257 %s.buffer.1.float = bitcast i32 %s.buffer.1 to float 258 %s.buffer.2 = extractelement <4 x i32> %s.buffer, i32 2 259 %s.buffer.2.float = bitcast i32 %s.buffer.2 to float 260 %s.buffer.3 = extractelement <4 x i32> %s.buffer, i32 3 261 %s.buffer.3.float = bitcast i32 %s.buffer.3 to float 262 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %s.buffer.0.float, float %s.buffer.1.float, float %s.buffer.2.float, float %s.buffer.3.float, i1 true, i1 true) #0 263 ret void 264} 265 266; dwordx8 s.buffer.load 267; GCN-LABEL: {{^}}s_buffer_load_dwordx8: 268; VIGFX9_10: s_buffer_load_dwordx8 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x80 269; SICI: s_buffer_load_dwordx8 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x20 270define amdgpu_ps void @s_buffer_load_dwordx8(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 271main_body: 272 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 273 %s.buffer = call <8 x i32> @llvm.amdgcn.s.buffer.load.v8i32(<4 x i32> %tmp22, i32 128, i32 0) 274 %s.buffer.0 = extractelement <8 x i32> %s.buffer, i32 0 275 %s.buffer.0.float = bitcast i32 %s.buffer.0 to float 276 %s.buffer.1 = extractelement <8 x i32> %s.buffer, i32 2 277 %s.buffer.1.float = bitcast i32 %s.buffer.1 to float 278 %s.buffer.2 = extractelement <8 x i32> %s.buffer, i32 5 279 %s.buffer.2.float = bitcast i32 %s.buffer.2 to float 280 %s.buffer.3 = extractelement <8 x i32> %s.buffer, i32 7 281 %s.buffer.3.float = bitcast i32 %s.buffer.3 to float 282 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %s.buffer.0.float, float %s.buffer.1.float, float %s.buffer.2.float, float %s.buffer.3.float, i1 true, i1 true) #0 283 ret void 284} 285 286; dwordx8 s.buffer.load 287; GCN-LABEL: {{^}}s_buffer_load_dwordx8_v8f32: 288; VIGFX9_10: s_buffer_load_dwordx8 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x80 289; SICI: s_buffer_load_dwordx8 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x20 290define amdgpu_ps void @s_buffer_load_dwordx8_v8f32(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 291main_body: 292 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 293 %s.buffer = call <8 x float> @llvm.amdgcn.s.buffer.load.v8f32(<4 x i32> %tmp22, i32 128, i32 0) 294 %s.buffer.0 = extractelement <8 x float> %s.buffer, i32 0 295 %s.buffer.1 = extractelement <8 x float> %s.buffer, i32 2 296 %s.buffer.2 = extractelement <8 x float> %s.buffer, i32 5 297 %s.buffer.3 = extractelement <8 x float> %s.buffer, i32 7 298 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %s.buffer.0, float %s.buffer.1, float %s.buffer.2, float %s.buffer.3, i1 true, i1 true) #0 299 ret void 300} 301 302; dwordx16 s.buffer.load 303; GCN-LABEL: {{^}}s_buffer_load_dwordx16: 304; VIGFX9_10: s_buffer_load_dwordx16 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x80 305; SICI: s_buffer_load_dwordx16 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x20 306define amdgpu_ps void @s_buffer_load_dwordx16(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 307main_body: 308 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 309 %s.buffer = call <16 x i32> @llvm.amdgcn.s.buffer.load.v16i32(<4 x i32> %tmp22, i32 128, i32 0) 310 %s.buffer.0 = extractelement <16 x i32> %s.buffer, i32 0 311 %s.buffer.0.float = bitcast i32 %s.buffer.0 to float 312 %s.buffer.1 = extractelement <16 x i32> %s.buffer, i32 3 313 %s.buffer.1.float = bitcast i32 %s.buffer.1 to float 314 %s.buffer.2 = extractelement <16 x i32> %s.buffer, i32 12 315 %s.buffer.2.float = bitcast i32 %s.buffer.2 to float 316 %s.buffer.3 = extractelement <16 x i32> %s.buffer, i32 15 317 %s.buffer.3.float = bitcast i32 %s.buffer.3 to float 318 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %s.buffer.0.float, float %s.buffer.1.float, float %s.buffer.2.float, float %s.buffer.3.float, i1 true, i1 true) #0 319 ret void 320} 321 322; GCN-LABEL: {{^}}s_buffer_load_dwordx16_v16f32: 323; VIGFX9_10: s_buffer_load_dwordx16 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x80 324; SICI: s_buffer_load_dwordx16 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]:[0-9]}}], 0x20 325define amdgpu_ps void @s_buffer_load_dwordx16_v16f32(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in) #0 { 326main_body: 327 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 328 %s.buffer = call <16 x float> @llvm.amdgcn.s.buffer.load.v16f32(<4 x i32> %tmp22, i32 128, i32 0) 329 %s.buffer.0 = extractelement <16 x float> %s.buffer, i32 0 330 %s.buffer.1 = extractelement <16 x float> %s.buffer, i32 3 331 %s.buffer.2 = extractelement <16 x float> %s.buffer, i32 12 332 %s.buffer.3 = extractelement <16 x float> %s.buffer, i32 15 333 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %s.buffer.0, float %s.buffer.1, float %s.buffer.2, float %s.buffer.3, i1 true, i1 true) #0 334 ret void 335} 336 337; GCN-LABEL: {{^}}smrd_sgpr_offset: 338; GCN: s_buffer_load_dword s{{[0-9]}}, s[0:3], s4 339define amdgpu_ps float @smrd_sgpr_offset(<4 x i32> inreg %desc, i32 inreg %offset) #0 { 340main_body: 341 %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %offset, i32 0) 342 ret float %r 343} 344 345; GCN-LABEL: {{^}}smrd_vgpr_offset: 346; GCN: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ; 347define amdgpu_ps float @smrd_vgpr_offset(<4 x i32> inreg %desc, i32 %offset) #0 { 348main_body: 349 %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %offset, i32 0) 350 ret float %r 351} 352 353; GCN-LABEL: {{^}}smrd_vgpr_offset_imm: 354; GCN-NEXT: %bb. 355; GCN-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen offset:4092 ; 356define amdgpu_ps float @smrd_vgpr_offset_imm(<4 x i32> inreg %desc, i32 %offset) #0 { 357main_body: 358 %off = add i32 %offset, 4092 359 %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %off, i32 0) 360 ret float %r 361} 362 363; GCN-LABEL: {{^}}smrd_vgpr_offset_imm_too_large: 364; GCN-NEXT: %bb. 365; SICI-NEXT: v_add_{{i|u}}32_e32 v0, {{(vcc, )?}}0x1000, v0 366; SICI-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 0 offen ; 367; VIGFX9_10-NEXT: buffer_load_dword v{{[0-9]}}, v0, s[0:3], 4 offen offset:4092 ; 368define amdgpu_ps float @smrd_vgpr_offset_imm_too_large(<4 x i32> inreg %desc, i32 %offset) #0 { 369main_body: 370 %off = add i32 %offset, 4096 371 %r = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %off, i32 0) 372 ret float %r 373} 374 375; GCN-LABEL: {{^}}smrd_imm_merged: 376; GCN-NEXT: %bb. 377; SICI-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x1 378; SICI-NEXT: s_buffer_load_dwordx2 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x7 379; GFX10-NEXT: s_clause 380; VIGFX9_10-NEXT: s_buffer_load_dwordx4 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x4 381; VIGFX9_10-NEXT: s_buffer_load_dwordx2 s[{{[0-9]}}:{{[0-9]}}], s[0:3], 0x1c 382define amdgpu_ps void @smrd_imm_merged(<4 x i32> inreg %desc) #0 { 383main_body: 384 %r1 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 4, i32 0) 385 %r2 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 8, i32 0) 386 %r3 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 12, i32 0) 387 %r4 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 16, i32 0) 388 %r5 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 28, i32 0) 389 %r6 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 32, i32 0) 390 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r1, float %r2, float %r3, float %r4, i1 true, i1 true) #0 391 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r5, float %r6, float undef, float undef, i1 true, i1 true) #0 392 ret void 393} 394 395; GCN-LABEL: {{^}}smrd_imm_merge_m0: 396; 397; GCN: s_buffer_load_dwordx2 398; SICIVI: s_mov_b32 m0 399; SICIVI-DAG: v_interp_p1_f32 400; SICIVI-DAG: v_interp_p1_f32 401; SICIVI-DAG: v_interp_p1_f32 402; SICIVI-DAG: v_interp_p2_f32 403; SICIVI-DAG: v_interp_p2_f32 404; SICIVI-DAG: v_interp_p2_f32 405; 406; extractelement does not result in movrels anymore for vectors gitting 8 dwords 407; SICIVI-NOT: s_mov_b32 m0 408; SICIVI-NOT: v_movrels_b32_e32 409; v_cndmask_b32_e32 410; v_cndmask_b32_e32 411; 412; Merging is still thwarted on GFX9 due to s_set_gpr_idx 413; 414define amdgpu_ps float @smrd_imm_merge_m0(<4 x i32> inreg %desc, i32 inreg %prim, float %u, float %v) #0 { 415main_body: 416 %idx1.f = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 0, i32 0) 417 %idx1 = bitcast float %idx1.f to i32 418 419 %v0.x1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 0, i32 %prim) 420 %v0.x = call nsz float @llvm.amdgcn.interp.p2(float %v0.x1, float %v, i32 0, i32 0, i32 %prim) 421 %v0.y1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 1, i32 %prim) 422 %v0.y = call nsz float @llvm.amdgcn.interp.p2(float %v0.y1, float %v, i32 0, i32 1, i32 %prim) 423 %v0.z1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 0, i32 2, i32 %prim) 424 %v0.z = call nsz float @llvm.amdgcn.interp.p2(float %v0.z1, float %v, i32 0, i32 2, i32 %prim) 425 %v0.tmp0 = insertelement <3 x float> undef, float %v0.x, i32 0 426 %v0.tmp1 = insertelement <3 x float> %v0.tmp0, float %v0.y, i32 1 427 %v0 = insertelement <3 x float> %v0.tmp1, float %v0.z, i32 2 428 %a = extractelement <3 x float> %v0, i32 %idx1 429 430 %v1.x1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 1, i32 0, i32 %prim) 431 %v1.x = call nsz float @llvm.amdgcn.interp.p2(float %v1.x1, float %v, i32 1, i32 0, i32 %prim) 432 %v1.y1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 1, i32 1, i32 %prim) 433 %v1.y = call nsz float @llvm.amdgcn.interp.p2(float %v1.y1, float %v, i32 1, i32 1, i32 %prim) 434 %v1.z1 = call nsz float @llvm.amdgcn.interp.p1(float %u, i32 1, i32 2, i32 %prim) 435 %v1.z = call nsz float @llvm.amdgcn.interp.p2(float %v1.z1, float %v, i32 1, i32 2, i32 %prim) 436 %v1.tmp0 = insertelement <3 x float> undef, float %v0.x, i32 0 437 %v1.tmp1 = insertelement <3 x float> %v0.tmp0, float %v0.y, i32 1 438 %v1 = insertelement <3 x float> %v0.tmp1, float %v0.z, i32 2 439 440 %b = extractelement <3 x float> %v1, i32 %idx1 441 %c = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 4, i32 0) 442 443 %res.tmp = fadd float %a, %b 444 %res = fadd float %res.tmp, %c 445 ret float %res 446} 447 448; GCN-LABEL: {{^}}smrd_vgpr_merged: 449; GCN-NEXT: %bb. 450; GFX10-NEXT: s_clause 451; GCN-NEXT: buffer_load_dwordx4 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:4 452; GCN-NEXT: buffer_load_dwordx2 v[{{[0-9]}}:{{[0-9]}}], v0, s[0:3], 0 offen offset:28 453define amdgpu_ps void @smrd_vgpr_merged(<4 x i32> inreg %desc, i32 %a) #0 { 454main_body: 455 %a1 = add i32 %a, 4 456 %a2 = add i32 %a, 8 457 %a3 = add i32 %a, 12 458 %a4 = add i32 %a, 16 459 %a5 = add i32 %a, 28 460 %a6 = add i32 %a, 32 461 %r1 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a1, i32 0) 462 %r2 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a2, i32 0) 463 %r3 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a3, i32 0) 464 %r4 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a4, i32 0) 465 %r5 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a5, i32 0) 466 %r6 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %a6, i32 0) 467 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r1, float %r2, float %r3, float %r4, i1 true, i1 true) #0 468 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r5, float %r6, float undef, float undef, i1 true, i1 true) #0 469 ret void 470} 471 472; GCN-LABEL: {{^}}smrd_sgpr_descriptor_promoted 473; GCN: v_readfirstlane 474define amdgpu_cs void @smrd_sgpr_descriptor_promoted([0 x i8] addrspace(4)* inreg noalias dereferenceable(18446744073709551615), i32) #0 { 475main_body: 476 %descptr = bitcast [0 x i8] addrspace(4)* %0 to <4 x i32> addrspace(4)*, !amdgpu.uniform !0 477 br label %.outer_loop_header 478 479ret_block: ; preds = %.outer, %.label22, %main_body 480 ret void 481 482.outer_loop_header: 483 br label %.inner_loop_header 484 485.inner_loop_header: ; preds = %.inner_loop_body, %.outer_loop_header 486 %loopctr.1 = phi i32 [ 0, %.outer_loop_header ], [ %loopctr.2, %.inner_loop_body ] 487 %loopctr.2 = add i32 %loopctr.1, 1 488 %inner_br1 = icmp slt i32 %loopctr.2, 10 489 br i1 %inner_br1, label %.inner_loop_body, label %ret_block 490 491.inner_loop_body: 492 %descriptor = load <4 x i32>, <4 x i32> addrspace(4)* %descptr, align 16, !invariant.load !0 493 %load1result = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %descriptor, i32 0, i32 0) 494 store float %load1result, float addrspace(1)* undef 495 %inner_br2 = icmp uge i32 %1, 10 496 br i1 %inner_br2, label %.inner_loop_header, label %.outer_loop_body 497 498.outer_loop_body: 499 %offset = shl i32 %loopctr.2, 6 500 %load2result = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %descriptor, i32 %offset, i32 0) 501 %outer_br = fcmp ueq float %load2result, 0x0 502 br i1 %outer_br, label %.outer_loop_header, label %ret_block 503} 504 505; SMRD load with a non-const offset 506; GCN-LABEL: {{^}}smrd_load_nonconst0: 507; SIVIGFX9_10: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} 508; SIVIGFX9_10: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} 509; CI: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} 510; CI: s_buffer_load_dword s{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} 511; GCN: s_endpgm 512define amdgpu_ps void @smrd_load_nonconst0(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in, i32 inreg %ncoff) #0 { 513main_body: 514 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 515 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp 516 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 %ncoff, i32 0) 517 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 518 %s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 %ncoff, i32 0) 519 %s.buffer.float = bitcast i32 %s.buffer to float 520 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %s.buffer.float, i1 true, i1 true) #0 521 ret void 522} 523 524; SMRD load with a non-const non-uniform offset 525; GCN-LABEL: {{^}}smrd_load_nonconst1: 526; SIVIGFX9_10: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen 527; SIVIGFX9_10: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen 528; CI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen 529; CI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen 530; GCN: s_endpgm 531define amdgpu_ps void @smrd_load_nonconst1(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in, i32 %ncoff) #0 { 532main_body: 533 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 534 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp 535 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 %ncoff, i32 0) 536 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 537 %s.buffer = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> %tmp22, i32 %ncoff, i32 0) 538 %s.buffer.float = bitcast i32 %s.buffer to float 539 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %s.buffer.float, i1 true, i1 true) #0 540 ret void 541} 542 543; SMRD load with a non-const non-uniform offset of > 4 dwords (requires splitting) 544; GCN-LABEL: {{^}}smrd_load_nonconst2: 545; SIVIGFX9_10-DAG: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen 546; SIVIGFX9_10-DAG: buffer_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen 547; CI: buffer_load_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen 548; CI: buffer_load_dwordx4 v[{{[0-9]+:[0-9]+}}], v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], 0 offen 549; GCN: s_endpgm 550define amdgpu_ps void @smrd_load_nonconst2(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in, i32 %ncoff) #0 { 551main_body: 552 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 553 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp 554 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 %ncoff, i32 0) 555 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 556 %s.buffer = call <8 x i32> @llvm.amdgcn.s.buffer.load.v8i32(<4 x i32> %tmp22, i32 %ncoff, i32 0) 557 %s.buffer.elt = extractelement <8 x i32> %s.buffer, i32 1 558 %s.buffer.float = bitcast i32 %s.buffer.elt to float 559 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %tmp21, float %tmp21, float %tmp21, float %s.buffer.float, i1 true, i1 true) #0 560 ret void 561} 562 563; SMRD load with a non-const non-uniform offset of > 4 dwords (requires splitting) 564; GCN-LABEL: {{^}}smrd_load_nonconst3: 565; GCN-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], 0 offen ; 566; GCN-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], 0 offen offset:16 ; 567; GCN-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], 0 offen offset:32 ; 568; GCN-DAG: buffer_load_dwordx4 v[12:15], v{{[0-9]+}}, s[0:3], 0 offen offset:48 ; 569; GCN: ; return to shader part epilog 570define amdgpu_ps <16 x float> @smrd_load_nonconst3(<4 x i32> inreg %rsrc, i32 %off) #0 { 571main_body: 572 %ld = call <16 x i32> @llvm.amdgcn.s.buffer.load.v16i32(<4 x i32> %rsrc, i32 %off, i32 0) 573 %bc = bitcast <16 x i32> %ld to <16 x float> 574 ret <16 x float> %bc 575} 576 577; GCN-LABEL: {{^}}smrd_load_nonconst4: 578; SICI: v_add_i32_e32 v{{[0-9]+}}, vcc, 0xff8, v0 ; 579; SICI-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], 0 offen ; 580; SICI-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], 0 offen offset:16 ; 581; SICI-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], 0 offen offset:32 ; 582; SICI-DAG: buffer_load_dwordx4 v[12:15], v{{[0-9]+}}, s[0:3], 0 offen offset:48 ; 583; VIGFX9_10-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], 56 offen offset:4032 ; 584; VIGFX9_10-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], 56 offen offset:4048 ; 585; VIGFX9_10-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], 56 offen offset:4064 ; 586; VIGFX9_10-DAG: buffer_load_dwordx4 v[12:15], v{{[0-9]+}}, s[0:3], 56 offen offset:4080 ; 587; GCN: ; return to shader part epilog 588define amdgpu_ps <16 x float> @smrd_load_nonconst4(<4 x i32> inreg %rsrc, i32 %off) #0 { 589main_body: 590 %off.2 = add i32 %off, 4088 591 %ld = call <16 x i32> @llvm.amdgcn.s.buffer.load.v16i32(<4 x i32> %rsrc, i32 %off.2, i32 0) 592 %bc = bitcast <16 x i32> %ld to <16 x float> 593 ret <16 x float> %bc 594} 595 596; GCN-LABEL: {{^}}smrd_load_nonconst5: 597; SICI: v_add_i32_e32 v{{[0-9]+}}, vcc, 0x1004, v0 598; SICI-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], 0 offen ; 599; SICI-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], 0 offen offset:16 ; 600; SICI-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], 0 offen offset:32 ; 601; SICI-DAG: buffer_load_dwordx4 v[12:15], v{{[0-9]+}}, s[0:3], 0 offen offset:48 ; 602; VIGFX9_10: s_movk_i32 s4, 0xfc0 603; VIGFX9_10-DAG: buffer_load_dwordx4 v[0:3], v{{[0-9]+}}, s[0:3], s4 offen offset:68 ; 604; VIGFX9_10-DAG: buffer_load_dwordx4 v[4:7], v{{[0-9]+}}, s[0:3], s4 offen offset:84 ; 605; VIGFX9_10-DAG: buffer_load_dwordx4 v[8:11], v{{[0-9]+}}, s[0:3], s4 offen offset:100 ; 606; VIGFX9_10-DAG: buffer_load_dwordx4 v[12:15], v{{[0-9]+}}, s[0:3], s4 offen offset:116 ; 607; GCN: ; return to shader part epilog 608define amdgpu_ps <16 x float> @smrd_load_nonconst5(<4 x i32> inreg %rsrc, i32 %off) #0 { 609main_body: 610 %off.2 = add i32 %off, 4100 611 %ld = call <16 x i32> @llvm.amdgcn.s.buffer.load.v16i32(<4 x i32> %rsrc, i32 %off.2, i32 0) 612 %bc = bitcast <16 x i32> %ld to <16 x float> 613 ret <16 x float> %bc 614} 615 616; SMRD load dwordx2 617; GCN-LABEL: {{^}}smrd_load_dwordx2: 618; SIVIGFX9_10: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} 619; CI: s_buffer_load_dwordx2 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} 620; GCN: s_endpgm 621define amdgpu_ps void @smrd_load_dwordx2(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <32 x i8> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, <4 x i32> addrspace(4)* inreg %in, i32 inreg %ncoff) #0 { 622main_body: 623 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %in 624 %s.buffer = call <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32> %tmp22, i32 %ncoff, i32 0) 625 %s.buffer.float = bitcast <2 x i32> %s.buffer to <2 x float> 626 %r.1 = extractelement <2 x float> %s.buffer.float, i32 0 627 %r.2 = extractelement <2 x float> %s.buffer.float, i32 1 628 call void @llvm.amdgcn.exp.f32(i32 0, i32 15, float %r.1, float %r.1, float %r.1, float %r.2, i1 true, i1 true) #0 629 ret void 630} 631 632; GCN-LABEL: {{^}}smrd_uniform_loop: 633; 634; TODO: we should keep the loop counter in an SGPR 635; 636; GCN: s_buffer_load_dword 637define amdgpu_ps float @smrd_uniform_loop(<4 x i32> inreg %desc, i32 %bound) #0 { 638main_body: 639 br label %loop 640 641loop: 642 %counter = phi i32 [ 0, %main_body ], [ %counter.next, %loop ] 643 %sum = phi float [ 0.0, %main_body ], [ %sum.next, %loop ] 644 %offset = shl i32 %counter, 2 645 %v = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %offset, i32 0) 646 %sum.next = fadd float %sum, %v 647 %counter.next = add i32 %counter, 1 648 %cc = icmp uge i32 %counter.next, %bound 649 br i1 %cc, label %exit, label %loop 650 651exit: 652 ret float %sum.next 653} 654 655 656; GCN-LABEL: {{^}}smrd_uniform_loop2: 657; (this test differs from smrd_uniform_loop by the more complex structure of phis, 658; which used to confuse the DivergenceAnalysis after structurization) 659; 660; TODO: we should keep the loop counter in an SGPR and use an S_BUFFER_LOAD 661; 662; GCN: buffer_load_dword 663define amdgpu_ps float @smrd_uniform_loop2(<4 x i32> inreg %desc, i32 %bound, i32 %bound.a) #0 { 664main_body: 665 br label %loop 666 667loop: 668 %counter = phi i32 [ 0, %main_body ], [ %counter.next, %loop.a ], [ %counter.next, %loop.b ] 669 %sum = phi float [ 0.0, %main_body ], [ %sum.next, %loop.a ], [ %sum.next.b, %loop.b ] 670 %offset = shl i32 %counter, 2 671 %v = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %desc, i32 %offset, i32 0) 672 %sum.next = fadd float %sum, %v 673 %counter.next = add i32 %counter, 1 674 %cc = icmp uge i32 %counter.next, %bound 675 br i1 %cc, label %exit, label %loop.a 676 677loop.a: 678 %cc.a = icmp uge i32 %counter.next, %bound.a 679 br i1 %cc, label %loop, label %loop.b 680 681loop.b: 682 %sum.next.b = fadd float %sum.next, 1.0 683 br label %loop 684 685exit: 686 ret float %sum.next 687} 688 689; This test checks that the load after some control flow with an offset based 690; on a divergent shader input is correctly recognized as divergent. This was 691; reduced from an actual regression. Yes, the %unused argument matters, as 692; well as the fact that %arg4 is a vector. 693; 694; GCN-LABEL: {{^}}arg_divergence: 695; GCN: buffer_load_dword v0, v0, 696; GCN-NEXT: s_waitcnt 697; GCN-NEXT: ; return to shader part epilog 698define amdgpu_cs float @arg_divergence(i32 inreg %unused, <3 x i32> %arg4) #0 { 699main_body: 700 br i1 undef, label %if1, label %endif1 701 702if1: ; preds = %main_body 703 store i32 0, i32 addrspace(3)* undef, align 4 704 br label %endif1 705 706endif1: ; preds = %if1, %main_body 707 %tmp13 = extractelement <3 x i32> %arg4, i32 0 708 %tmp97 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> undef, i32 %tmp13, i32 0) 709 ret float %tmp97 710} 711 712; GCN-LABEL: {{^}}s_buffer_load_f32: 713; GCN: s_buffer_load_dword s0, s[0:3], s4 714define amdgpu_ps void @s_buffer_load_f32(<4 x i32> inreg %rsrc, i32 inreg %offset) { 715 %sgpr = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %rsrc, i32 %offset, i32 0) 716 call void asm sideeffect "; use $0", "s"(float %sgpr) 717 ret void 718} 719 720; GCN-LABEL: {{^}}s_buffer_load_v2f32: 721; GCN: s_buffer_load_dwordx2 s[0:1], s[0:3], s4 722define amdgpu_ps void @s_buffer_load_v2f32(<4 x i32> inreg %rsrc, i32 inreg %offset) { 723 %sgpr = call <2 x float> @llvm.amdgcn.s.buffer.load.v2f32(<4 x i32> %rsrc, i32 %offset, i32 0) 724 call void asm sideeffect "; use $0", "s"(<2 x float> %sgpr) 725 ret void 726} 727 728; GCN-LABEL: {{^}}s_buffer_load_v4f32: 729; GCN: s_buffer_load_dwordx4 s[0:3], s[0:3], s4 730define amdgpu_ps void @s_buffer_load_v4f32(<4 x i32> inreg %rsrc, i32 inreg %offset) { 731 %sgpr = call <4 x float> @llvm.amdgcn.s.buffer.load.v4f32(<4 x i32> %rsrc, i32 %offset, i32 0) 732 call void asm sideeffect "; use $0", "s"(<4 x float> %sgpr) 733 ret void 734} 735 736; GCN-LABEL: {{^}}s_buffer_load_v8f32: 737; GCN: s_buffer_load_dwordx8 s[0:7], s[0:3], s4 738define amdgpu_ps void @s_buffer_load_v8f32(<4 x i32> inreg %rsrc, i32 inreg %offset) { 739 %sgpr = call <8 x float> @llvm.amdgcn.s.buffer.load.v8f32(<4 x i32> %rsrc, i32 %offset, i32 0) 740 call void asm sideeffect "; use $0", "s"(<8 x float> %sgpr) 741 ret void 742} 743 744; GCN-LABEL: {{^}}s_buffer_load_v16f32: 745; GCN: s_buffer_load_dwordx16 s[0:15], s[0:3], s4 746define amdgpu_ps void @s_buffer_load_v16f32(<4 x i32> inreg %rsrc, i32 inreg %offset) { 747 %sgpr = call <16 x float> @llvm.amdgcn.s.buffer.load.v16f32(<4 x i32> %rsrc, i32 %offset, i32 0) 748 call void asm sideeffect "; use $0", "s"(<16 x float> %sgpr) 749 ret void 750} 751 752declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0 753declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32) #2 754declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32) #2 755 756declare i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32>, i32, i32) #1 757declare <2 x i32> @llvm.amdgcn.s.buffer.load.v2i32(<4 x i32>, i32, i32) 758declare <4 x i32> @llvm.amdgcn.s.buffer.load.v4i32(<4 x i32>, i32, i32) 759declare <8 x i32> @llvm.amdgcn.s.buffer.load.v8i32(<4 x i32>, i32, i32) 760declare <16 x i32> @llvm.amdgcn.s.buffer.load.v16i32(<4 x i32>, i32, i32) 761 762declare float @llvm.amdgcn.s.buffer.load.f32(<4 x i32>, i32, i32) 763declare <2 x float> @llvm.amdgcn.s.buffer.load.v2f32(<4 x i32>, i32, i32) 764declare <4 x float> @llvm.amdgcn.s.buffer.load.v4f32(<4 x i32>, i32, i32) 765declare <8 x float> @llvm.amdgcn.s.buffer.load.v8f32(<4 x i32>, i32, i32) 766declare <16 x float> @llvm.amdgcn.s.buffer.load.v16f32(<4 x i32>, i32, i32) 767 768attributes #0 = { nounwind } 769attributes #1 = { nounwind readnone } 770attributes #2 = { nounwind readnone speculatable } 771 772!0 = !{} 773