1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -march=amdgcn -mcpu=gfx902 -verify-machineinstrs -amdgpu-enable-global-sgpr-addr < %s | FileCheck -check-prefix=GCN %s 3 4define amdgpu_kernel void @vector_clause(<4 x i32> addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture %arg1) { 5; GCN-LABEL: vector_clause: 6; GCN: ; %bb.0: ; %bb 7; GCN-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 8; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c 9; GCN-NEXT: v_mov_b32_e32 v17, 0 10; GCN-NEXT: v_lshlrev_b32_e32 v16, 4, v0 11; GCN-NEXT: s_waitcnt lgkmcnt(0) 12; GCN-NEXT: global_load_dwordx4 v[0:3], v[16:17], s[2:3] 13; GCN-NEXT: global_load_dwordx4 v[4:7], v[16:17], s[2:3] offset:16 14; GCN-NEXT: global_load_dwordx4 v[8:11], v[16:17], s[2:3] offset:32 15; GCN-NEXT: global_load_dwordx4 v[12:15], v[16:17], s[2:3] offset:48 16; GCN-NEXT: s_nop 0 17; GCN-NEXT: s_waitcnt vmcnt(3) 18; GCN-NEXT: global_store_dwordx4 v[16:17], v[0:3], s[4:5] 19; GCN-NEXT: s_waitcnt vmcnt(3) 20; GCN-NEXT: global_store_dwordx4 v[16:17], v[4:7], s[4:5] offset:16 21; GCN-NEXT: s_waitcnt vmcnt(3) 22; GCN-NEXT: global_store_dwordx4 v[16:17], v[8:11], s[4:5] offset:32 23; GCN-NEXT: s_waitcnt vmcnt(3) 24; GCN-NEXT: global_store_dwordx4 v[16:17], v[12:15], s[4:5] offset:48 25; GCN-NEXT: s_endpgm 26bb: 27 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 28 %tmp2 = zext i32 %tmp to i64 29 %tmp3 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp2 30 %tmp4 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp3, align 16 31 %tmp5 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp2 32 %tmp6 = add nuw nsw i64 %tmp2, 1 33 %tmp7 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp6 34 %tmp8 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp7, align 16 35 %tmp9 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp6 36 %tmp10 = add nuw nsw i64 %tmp2, 2 37 %tmp11 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp10 38 %tmp12 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp11, align 16 39 %tmp13 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp10 40 %tmp14 = add nuw nsw i64 %tmp2, 3 41 %tmp15 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 %tmp14 42 %tmp16 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp15, align 16 43 %tmp17 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 %tmp14 44 store <4 x i32> %tmp4, <4 x i32> addrspace(1)* %tmp5, align 16 45 store <4 x i32> %tmp8, <4 x i32> addrspace(1)* %tmp9, align 16 46 store <4 x i32> %tmp12, <4 x i32> addrspace(1)* %tmp13, align 16 47 store <4 x i32> %tmp16, <4 x i32> addrspace(1)* %tmp17, align 16 48 ret void 49} 50 51define amdgpu_kernel void @scalar_clause(<4 x i32> addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture %arg1) { 52; GCN-LABEL: scalar_clause: 53; GCN: ; %bb.0: ; %bb 54; GCN-NEXT: s_load_dwordx2 s[16:17], s[0:1], 0x24 55; GCN-NEXT: s_load_dwordx2 s[18:19], s[0:1], 0x2c 56; GCN-NEXT: s_nop 0 57; GCN-NEXT: s_waitcnt lgkmcnt(0) 58; GCN-NEXT: s_load_dwordx4 s[0:3], s[16:17], 0x0 59; GCN-NEXT: s_load_dwordx4 s[4:7], s[16:17], 0x10 60; GCN-NEXT: s_load_dwordx4 s[8:11], s[16:17], 0x20 61; GCN-NEXT: s_load_dwordx4 s[12:15], s[16:17], 0x30 62; GCN-NEXT: v_mov_b32_e32 v12, s18 63; GCN-NEXT: s_waitcnt lgkmcnt(0) 64; GCN-NEXT: v_mov_b32_e32 v0, s0 65; GCN-NEXT: v_mov_b32_e32 v4, s4 66; GCN-NEXT: v_mov_b32_e32 v8, s8 67; GCN-NEXT: v_mov_b32_e32 v13, s19 68; GCN-NEXT: v_mov_b32_e32 v1, s1 69; GCN-NEXT: v_mov_b32_e32 v2, s2 70; GCN-NEXT: v_mov_b32_e32 v3, s3 71; GCN-NEXT: v_mov_b32_e32 v5, s5 72; GCN-NEXT: v_mov_b32_e32 v6, s6 73; GCN-NEXT: v_mov_b32_e32 v7, s7 74; GCN-NEXT: global_store_dwordx4 v[12:13], v[0:3], off 75; GCN-NEXT: global_store_dwordx4 v[12:13], v[4:7], off offset:16 76; GCN-NEXT: v_mov_b32_e32 v0, s12 77; GCN-NEXT: v_mov_b32_e32 v9, s9 78; GCN-NEXT: v_mov_b32_e32 v10, s10 79; GCN-NEXT: v_mov_b32_e32 v11, s11 80; GCN-NEXT: v_mov_b32_e32 v1, s13 81; GCN-NEXT: v_mov_b32_e32 v2, s14 82; GCN-NEXT: v_mov_b32_e32 v3, s15 83; GCN-NEXT: global_store_dwordx4 v[12:13], v[8:11], off offset:32 84; GCN-NEXT: global_store_dwordx4 v[12:13], v[0:3], off offset:48 85; GCN-NEXT: s_endpgm 86bb: 87 %tmp = load <4 x i32>, <4 x i32> addrspace(1)* %arg, align 16 88 %tmp2 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 1 89 %tmp3 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp2, align 16 90 %tmp4 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 1 91 %tmp5 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 2 92 %tmp6 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp5, align 16 93 %tmp7 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 2 94 %tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg, i64 3 95 %tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp8, align 16 96 %tmp10 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg1, i64 3 97 store <4 x i32> %tmp, <4 x i32> addrspace(1)* %arg1, align 16 98 store <4 x i32> %tmp3, <4 x i32> addrspace(1)* %tmp4, align 16 99 store <4 x i32> %tmp6, <4 x i32> addrspace(1)* %tmp7, align 16 100 store <4 x i32> %tmp9, <4 x i32> addrspace(1)* %tmp10, align 16 101 ret void 102} 103 104define void @mubuf_clause(<4 x i32> addrspace(5)* noalias nocapture readonly %arg, <4 x i32> addrspace(5)* noalias nocapture %arg1) { 105; GCN-LABEL: mubuf_clause: 106; GCN: ; %bb.0: ; %bb 107; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 108; GCN-NEXT: v_and_b32_e32 v2, 0x3ff, v2 109; GCN-NEXT: v_lshlrev_b32_e32 v2, 4, v2 110; GCN-NEXT: v_add_u32_e32 v0, v0, v2 111; GCN-NEXT: v_add_u32_e32 v1, v1, v2 112; GCN-NEXT: buffer_load_dword v6, v0, s[0:3], 0 offen offset:20 113; GCN-NEXT: buffer_load_dword v7, v0, s[0:3], 0 offen offset:24 114; GCN-NEXT: buffer_load_dword v8, v0, s[0:3], 0 offen offset:28 115; GCN-NEXT: buffer_load_dword v9, v0, s[0:3], 0 offen offset:32 116; GCN-NEXT: buffer_load_dword v10, v0, s[0:3], 0 offen offset:36 117; GCN-NEXT: buffer_load_dword v11, v0, s[0:3], 0 offen offset:40 118; GCN-NEXT: buffer_load_dword v12, v0, s[0:3], 0 offen offset:44 119; GCN-NEXT: buffer_load_dword v13, v0, s[0:3], 0 offen offset:48 120; GCN-NEXT: buffer_load_dword v14, v0, s[0:3], 0 offen offset:52 121; GCN-NEXT: buffer_load_dword v15, v0, s[0:3], 0 offen offset:56 122; GCN-NEXT: buffer_load_dword v16, v0, s[0:3], 0 offen offset:60 123; GCN-NEXT: buffer_load_dword v2, v0, s[0:3], 0 offen 124; GCN-NEXT: buffer_load_dword v3, v0, s[0:3], 0 offen offset:4 125; GCN-NEXT: buffer_load_dword v4, v0, s[0:3], 0 offen offset:8 126; GCN-NEXT: buffer_load_dword v5, v0, s[0:3], 0 offen offset:12 127; GCN-NEXT: s_nop 0 128; GCN-NEXT: buffer_load_dword v0, v0, s[0:3], 0 offen offset:16 129; GCN-NEXT: s_nop 0 130; GCN-NEXT: s_waitcnt vmcnt(4) 131; GCN-NEXT: buffer_store_dword v2, v1, s[0:3], 0 offen 132; GCN-NEXT: s_waitcnt vmcnt(4) 133; GCN-NEXT: buffer_store_dword v3, v1, s[0:3], 0 offen offset:4 134; GCN-NEXT: s_waitcnt vmcnt(4) 135; GCN-NEXT: buffer_store_dword v4, v1, s[0:3], 0 offen offset:8 136; GCN-NEXT: s_waitcnt vmcnt(4) 137; GCN-NEXT: buffer_store_dword v5, v1, s[0:3], 0 offen offset:12 138; GCN-NEXT: s_waitcnt vmcnt(4) 139; GCN-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen offset:16 140; GCN-NEXT: buffer_store_dword v6, v1, s[0:3], 0 offen offset:20 141; GCN-NEXT: buffer_store_dword v7, v1, s[0:3], 0 offen offset:24 142; GCN-NEXT: buffer_store_dword v8, v1, s[0:3], 0 offen offset:28 143; GCN-NEXT: buffer_store_dword v9, v1, s[0:3], 0 offen offset:32 144; GCN-NEXT: buffer_store_dword v10, v1, s[0:3], 0 offen offset:36 145; GCN-NEXT: buffer_store_dword v11, v1, s[0:3], 0 offen offset:40 146; GCN-NEXT: buffer_store_dword v12, v1, s[0:3], 0 offen offset:44 147; GCN-NEXT: buffer_store_dword v13, v1, s[0:3], 0 offen offset:48 148; GCN-NEXT: buffer_store_dword v14, v1, s[0:3], 0 offen offset:52 149; GCN-NEXT: buffer_store_dword v15, v1, s[0:3], 0 offen offset:56 150; GCN-NEXT: buffer_store_dword v16, v1, s[0:3], 0 offen offset:60 151; GCN-NEXT: s_waitcnt vmcnt(0) 152; GCN-NEXT: s_setpc_b64 s[30:31] 153bb: 154 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 155 %tmp2 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg, i32 %tmp 156 %tmp3 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp2, align 16 157 %tmp4 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg1, i32 %tmp 158 %tmp5 = add nuw nsw i32 %tmp, 1 159 %tmp6 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg, i32 %tmp5 160 %tmp7 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp6, align 16 161 %tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg1, i32 %tmp5 162 %tmp9 = add nuw nsw i32 %tmp, 2 163 %tmp10 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg, i32 %tmp9 164 %tmp11 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp10, align 16 165 %tmp12 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg1, i32 %tmp9 166 %tmp13 = add nuw nsw i32 %tmp, 3 167 %tmp14 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg, i32 %tmp13 168 %tmp15 = load <4 x i32>, <4 x i32> addrspace(5)* %tmp14, align 16 169 %tmp16 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %arg1, i32 %tmp13 170 store <4 x i32> %tmp3, <4 x i32> addrspace(5)* %tmp4, align 16 171 store <4 x i32> %tmp7, <4 x i32> addrspace(5)* %tmp8, align 16 172 store <4 x i32> %tmp11, <4 x i32> addrspace(5)* %tmp12, align 16 173 store <4 x i32> %tmp15, <4 x i32> addrspace(5)* %tmp16, align 16 174 ret void 175} 176 177define amdgpu_kernel void @vector_clause_indirect(i64 addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture readnone %arg1, <4 x i32> addrspace(1)* noalias nocapture %arg2) { 178; GCN-LABEL: vector_clause_indirect: 179; GCN: ; %bb.0: ; %bb 180; GCN-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24 181; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34 182; GCN-NEXT: v_mov_b32_e32 v1, 0 183; GCN-NEXT: v_lshlrev_b32_e32 v0, 3, v0 184; GCN-NEXT: s_waitcnt lgkmcnt(0) 185; GCN-NEXT: global_load_dwordx2 v[8:9], v[0:1], s[2:3] 186; GCN-NEXT: s_nop 0 187; GCN-NEXT: s_waitcnt vmcnt(0) 188; GCN-NEXT: global_load_dwordx4 v[0:3], v[8:9], off 189; GCN-NEXT: global_load_dwordx4 v[4:7], v[8:9], off offset:16 190; GCN-NEXT: v_mov_b32_e32 v9, s5 191; GCN-NEXT: v_mov_b32_e32 v8, s4 192; GCN-NEXT: s_waitcnt vmcnt(1) 193; GCN-NEXT: global_store_dwordx4 v[8:9], v[0:3], off 194; GCN-NEXT: s_waitcnt vmcnt(1) 195; GCN-NEXT: global_store_dwordx4 v[8:9], v[4:7], off offset:16 196; GCN-NEXT: s_endpgm 197bb: 198 %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() 199 %tmp3 = zext i32 %tmp to i64 200 %tmp4 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 %tmp3 201 %tmp5 = bitcast i64 addrspace(1)* %tmp4 to <4 x i32> addrspace(1)* addrspace(1)* 202 %tmp6 = load <4 x i32> addrspace(1)*, <4 x i32> addrspace(1)* addrspace(1)* %tmp5, align 8 203 %tmp7 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp6, align 16 204 %tmp8 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %tmp6, i64 1 205 %tmp9 = load <4 x i32>, <4 x i32> addrspace(1)* %tmp8, align 16 206 store <4 x i32> %tmp7, <4 x i32> addrspace(1)* %arg2, align 16 207 %tmp10 = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(1)* %arg2, i64 1 208 store <4 x i32> %tmp9, <4 x i32> addrspace(1)* %tmp10, align 16 209 ret void 210} 211 212define void @load_global_d16_hi(i16 addrspace(1)* %in, i16 %reg, <2 x i16> addrspace(1)* %out) { 213; GCN-LABEL: load_global_d16_hi: 214; GCN: ; %bb.0: ; %entry 215; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 216; GCN-NEXT: v_mov_b32_e32 v5, v2 217; GCN-NEXT: global_load_short_d16_hi v5, v[0:1], off 218; GCN-NEXT: s_nop 0 219; GCN-NEXT: global_load_short_d16_hi v2, v[0:1], off offset:64 220; GCN-NEXT: s_nop 0 221; GCN-NEXT: s_waitcnt vmcnt(1) 222; GCN-NEXT: global_store_dword v[3:4], v5, off 223; GCN-NEXT: s_waitcnt vmcnt(1) 224; GCN-NEXT: global_store_dword v[3:4], v2, off offset:128 225; GCN-NEXT: s_waitcnt vmcnt(0) 226; GCN-NEXT: s_setpc_b64 s[30:31] 227entry: 228 %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i64 32 229 %load1 = load i16, i16 addrspace(1)* %in 230 %load2 = load i16, i16 addrspace(1)* %gep 231 %build0 = insertelement <2 x i16> undef, i16 %reg, i32 0 232 %build1 = insertelement <2 x i16> %build0, i16 %load1, i32 1 233 store <2 x i16> %build1, <2 x i16> addrspace(1)* %out 234 %build2 = insertelement <2 x i16> undef, i16 %reg, i32 0 235 %build3 = insertelement <2 x i16> %build2, i16 %load2, i32 1 236 %gep2 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 32 237 store <2 x i16> %build3, <2 x i16> addrspace(1)* %gep2 238 ret void 239} 240 241define void @load_global_d16_lo(i16 addrspace(1)* %in, i32 %reg, <2 x i16> addrspace(1)* %out) { 242; GCN-LABEL: load_global_d16_lo: 243; GCN: ; %bb.0: ; %entry 244; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) 245; GCN-NEXT: v_mov_b32_e32 v5, v2 246; GCN-NEXT: global_load_short_d16 v5, v[0:1], off 247; GCN-NEXT: s_nop 0 248; GCN-NEXT: global_load_short_d16 v2, v[0:1], off offset:64 249; GCN-NEXT: s_nop 0 250; GCN-NEXT: s_waitcnt vmcnt(1) 251; GCN-NEXT: global_store_dword v[3:4], v5, off 252; GCN-NEXT: s_waitcnt vmcnt(1) 253; GCN-NEXT: global_store_dword v[3:4], v2, off offset:128 254; GCN-NEXT: s_waitcnt vmcnt(0) 255; GCN-NEXT: s_setpc_b64 s[30:31] 256entry: 257 %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i64 32 258 %reg.bc1 = bitcast i32 %reg to <2 x i16> 259 %reg.bc2 = bitcast i32 %reg to <2 x i16> 260 %load1 = load i16, i16 addrspace(1)* %in 261 %load2 = load i16, i16 addrspace(1)* %gep 262 %build1 = insertelement <2 x i16> %reg.bc1, i16 %load1, i32 0 263 %build2 = insertelement <2 x i16> %reg.bc2, i16 %load2, i32 0 264 %gep2 = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 32 265 store <2 x i16> %build1, <2 x i16> addrspace(1)* %out 266 store <2 x i16> %build2, <2 x i16> addrspace(1)* %gep2 267 ret void 268} 269 270declare i32 @llvm.amdgcn.workitem.id.x() 271