1; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s 2; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=1 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVGPR -check-prefix=GCN %s 3; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -march=amdgcn -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s 4; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=0 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOVMEM -check-prefix=GCN %s 5; RUN: llc -O0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-spill-sgpr-to-smem=1 -march=amdgcn -mcpu=tonga -mattr=+vgpr-spilling -verify-machineinstrs < %s | FileCheck -check-prefix=TOSMEM -check-prefix=GCN %s 6 7; XXX - Why does it like to use vcc? 8 9; GCN-LABEL: {{^}}spill_m0: 10; TOSMEM: s_mov_b32 s[[LO:[0-9]+]], SCRATCH_RSRC_DWORD0 11; TOSMEM: s_mov_b32 s[[HI:[0-9]+]], 0xe80000 12 13; GCN-DAG: s_cmp_lg_u32 14 15; TOVGPR-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0 16; TOVGPR: v_writelane_b32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]], 0 17 18; TOVMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0 19; TOVMEM-DAG: v_mov_b32_e32 [[SPILL_VREG:v[0-9]+]], [[M0_COPY]] 20; TOVMEM: buffer_store_dword [[SPILL_VREG]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Spill 21; TOVMEM: s_waitcnt vmcnt(0) 22 23; TOSMEM-DAG: s_mov_b32 [[M0_COPY:s[0-9]+]], m0 24; TOSMEM: s_mov_b32 m0, s3{{$}} 25; TOSMEM-NOT: [[M0_COPY]] 26; TOSMEM: s_buffer_store_dword [[M0_COPY]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Spill 27; TOSMEM: s_waitcnt lgkmcnt(0) 28 29; GCN: s_cbranch_scc1 [[ENDIF:BB[0-9]+_[0-9]+]] 30 31; GCN: [[ENDIF]]: 32; TOVGPR: v_readlane_b32 [[M0_RESTORE:s[0-9]+]], [[SPILL_VREG]], 0 33; TOVGPR: s_mov_b32 m0, [[M0_RESTORE]] 34 35; TOVMEM: buffer_load_dword [[RELOAD_VREG:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} ; 4-byte Folded Reload 36; TOVMEM: s_waitcnt vmcnt(0) 37; TOVMEM: v_readfirstlane_b32 [[M0_RESTORE:s[0-9]+]], [[RELOAD_VREG]] 38; TOVMEM: s_mov_b32 m0, [[M0_RESTORE]] 39 40; TOSMEM: s_mov_b32 m0, s3{{$}} 41; TOSMEM: s_buffer_load_dword [[M0_RESTORE:s[0-9]+]], s{{\[}}[[LO]]:[[HI]]], m0 ; 4-byte Folded Reload 42; TOSMEM-NOT: [[M0_RESTORE]] 43; TOSMEM: s_mov_b32 m0, [[M0_RESTORE]] 44 45; GCN: s_add_i32 s{{[0-9]+}}, m0, 1 46define void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 { 47entry: 48 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0 49 %cmp0 = icmp eq i32 %cond, 0 50 br i1 %cmp0, label %if, label %endif 51 52if: 53 call void asm sideeffect "v_nop", ""() #0 54 br label %endif 55 56endif: 57 %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{M0}"(i32 %m0) #0 58 store i32 %foo, i32 addrspace(1)* %out 59 ret void 60} 61 62@lds = internal addrspace(3) global [64 x float] undef 63 64; m0 is killed, so it isn't necessary during the entry block spill to preserve it 65; GCN-LABEL: {{^}}spill_kill_m0_lds: 66; GCN: s_mov_b32 m0, s6 67; GCN: v_interp_mov_f32 68 69; TOSMEM-NOT: s_m0 70; TOSMEM: s_mov_b32 m0, s7 71; TOSMEM-NEXT: s_buffer_store_dword s{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 4-byte Folded Spill 72; TOSMEM-NOT: m0 73 74; TOSMEM-NOT: m0 75; TOSMEM: s_add_u32 m0, s7, 0x100 76; TOSMEM: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill 77; TOSMEM-NOT: m0 78 79; TOSMEM: s_mov_b64 exec, 80; TOSMEM: s_cbranch_execz 81; TOSMEM: s_branch 82 83; TOSMEM: BB{{[0-9]+_[0-9]+}}: 84; TOSMEM-NEXT: s_add_u32 m0, s7, 0x100 85; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload 86 87 88; GCN-NOT: v_readlane_b32 m0 89; GCN-NOT: s_buffer_store_dword m0 90; GCN-NOT: s_buffer_load_dword m0 91define amdgpu_ps void @spill_kill_m0_lds(<16 x i8> addrspace(2)* inreg %arg, <16 x i8> addrspace(2)* inreg %arg1, <32 x i8> addrspace(2)* inreg %arg2, i32 inreg %arg3) #0 { 92main_body: 93 %tmp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %arg3) 94 %cmp = fcmp ueq float 0.000000e+00, %tmp 95 br i1 %cmp, label %if, label %else 96 97if: ; preds = %main_body 98 %lds_ptr = getelementptr [64 x float], [64 x float] addrspace(3)* @lds, i32 0, i32 0 99 %lds_data = load float, float addrspace(3)* %lds_ptr 100 br label %endif 101 102else: ; preds = %main_body 103 %interp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %arg3) 104 br label %endif 105 106endif: ; preds = %else, %if 107 %export = phi float [ %lds_data, %if ], [ %interp, %else ] 108 %tmp4 = call i32 @llvm.SI.packf16(float %export, float %export) 109 %tmp5 = bitcast i32 %tmp4 to float 110 call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp5, float %tmp5, float %tmp5, float %tmp5) 111 ret void 112} 113 114; Force save and restore of m0 during SMEM spill 115; GCN-LABEL: {{^}}m0_unavailable_spill: 116 117; GCN: ; def m0, 1 118 119; GCN: s_mov_b32 m0, s2 120; GCN: v_interp_mov_f32 121 122; GCN: ; clobber m0 123 124; TOSMEM: s_mov_b32 vcc_hi, m0 125; TOSMEM: s_mov_b32 m0, s3 126; TOSMEM-NEXT: s_buffer_store_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Spill 127; TOSMEM: s_mov_b32 m0, vcc_hi 128 129; TOSMEM: s_mov_b64 exec, 130; TOSMEM: s_cbranch_execz 131; TOSMEM: s_branch 132 133; TOSMEM: BB{{[0-9]+_[0-9]+}}: 134; TOSMEM-NEXT: s_mov_b32 m0, s3 135; TOSMEM-NEXT: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, m0 ; 8-byte Folded Reload 136 137; GCN-NOT: v_readlane_b32 m0 138; GCN-NOT: s_buffer_store_dword m0 139; GCN-NOT: s_buffer_load_dword m0 140define void @m0_unavailable_spill(i32 %arg3) #0 { 141main_body: 142 %m0 = call i32 asm sideeffect "; def $0, 1", "={M0}"() #0 143 %tmp = call float @llvm.SI.fs.constant(i32 0, i32 0, i32 %arg3) 144 call void asm sideeffect "; clobber $0", "~{M0}"() #0 145 %cmp = fcmp ueq float 0.000000e+00, %tmp 146 br i1 %cmp, label %if, label %else 147 148if: ; preds = %main_body 149 store volatile i32 8, i32 addrspace(1)* undef 150 br label %endif 151 152else: ; preds = %main_body 153 store volatile i32 11, i32 addrspace(1)* undef 154 br label %endif 155 156endif: 157 ret void 158} 159 160; GCN-LABEL: {{^}}restore_m0_lds: 161; TOSMEM: s_load_dwordx2 [[REG:s\[[0-9]+:[0-9]+\]]] 162; TOSMEM: s_cmp_eq_u32 163; TOSMEM-NOT: m0 164; TOSMEM: s_mov_b32 m0, s3 165; TOSMEM: s_buffer_store_dwordx2 [[REG]], s[88:91], m0 ; 8-byte Folded Spill 166; TOSMEM-NOT: m0 167; TOSMEM: s_add_u32 m0, s3, 0x200 168; TOSMEM: s_buffer_store_dword s{{[0-9]+}}, s[88:91], m0 ; 4-byte Folded Spill 169; TOSMEM-NOT: m0 170; TOSMEM: s_cbranch_scc1 171 172; TOSMEM: s_mov_b32 m0, -1 173 174; TOSMEM: s_mov_b32 vcc_hi, m0 175; TOSMEM: s_mov_b32 m0, s3 176; TOSMEM: s_buffer_load_dwordx2 s{{\[[0-9]+:[0-9]+\]}}, s[88:91], m0 ; 8-byte Folded Reload 177; TOSMEM: s_mov_b32 m0, vcc_hi 178; TOSMEM: s_waitcnt lgkmcnt(0) 179 180; TOSMEM: ds_write_b64 181 182; TOSMEM-NOT: m0 183; TOSMEM: s_add_u32 m0, s3, 0x200 184; TOSMEM: s_buffer_load_dword s0, s[88:91], m0 ; 4-byte Folded Reload 185; TOSMEM-NOT: m0 186; TOSMEM: s_waitcnt lgkmcnt(0) 187; TOSMEM-NOT: m0 188; TOSMEM: s_mov_b32 m0, s0 189; TOSMEM: ; use m0 190 191; TOSMEM: s_dcache_wb 192; TOSMEM: s_endpgm 193define void @restore_m0_lds(i32 %arg) { 194 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0 195 %sval = load volatile i64, i64 addrspace(2)* undef 196 %cmp = icmp eq i32 %arg, 0 197 br i1 %cmp, label %ret, label %bb 198 199bb: 200 store volatile i64 %sval, i64 addrspace(3)* undef 201 call void asm sideeffect "; use $0", "{M0}"(i32 %m0) #0 202 br label %ret 203 204ret: 205 ret void 206} 207 208declare float @llvm.SI.fs.constant(i32, i32, i32) readnone 209 210declare i32 @llvm.SI.packf16(float, float) readnone 211 212declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float) 213 214attributes #0 = { nounwind } 215