1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs | FileCheck -check-prefix=MUBUF %s 3; RUN: llc < %s -march=amdgcn -mcpu=gfx1010 -amdgpu-enable-flat-scratch -verify-machineinstrs | FileCheck -check-prefix=FLATSCR %s 4 5; FIXME: The MUBUF loads in this test output are incorrect, their SOffset 6; should use the frame offset register, not the ABI stack pointer register. We 7; rely on the frame index argument of MUBUF stack accesses to survive until PEI 8; so we can fix up the SOffset to use the correct frame register in 9; eliminateFrameIndex. Some things like LocalStackSlotAllocation can lift the 10; frame index up into something (e.g. `v_add_nc_u32`) that we cannot fold back 11; into the MUBUF instruction, and so we end up emitting an incorrect offset. 12; Fixing this may involve adding stack access pseudos so that we don't have to 13; speculatively refer to the ABI stack pointer register at all. 14 15; An assert was hit when frame offset register was used to address FrameIndex. 16define amdgpu_kernel void @kernel_background_evaluate(float addrspace(5)* %kg, <4 x i32> addrspace(1)* %input, <4 x float> addrspace(1)* %output, i32 %i) { 17; MUBUF-LABEL: kernel_background_evaluate: 18; MUBUF: ; %bb.0: ; %entry 19; MUBUF-NEXT: s_load_dword s0, s[0:1], 0x24 20; MUBUF-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0 21; MUBUF-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1 22; MUBUF-NEXT: s_mov_b32 s38, -1 23; MUBUF-NEXT: s_mov_b32 s39, 0x31c16000 24; MUBUF-NEXT: s_add_u32 s36, s36, s3 25; MUBUF-NEXT: s_addc_u32 s37, s37, 0 26; MUBUF-NEXT: v_mov_b32_e32 v1, 0x2000 27; MUBUF-NEXT: v_mov_b32_e32 v2, 0x4000 28; MUBUF-NEXT: v_mov_b32_e32 v3, 0 29; MUBUF-NEXT: v_mov_b32_e32 v4, 0x400000 30; MUBUF-NEXT: s_mov_b32 s32, 0xc0000 31; MUBUF-NEXT: v_add_nc_u32_e64 v40, 4, 0x4000 32; MUBUF-NEXT: ; implicit-def: $vcc_hi 33; MUBUF-NEXT: s_getpc_b64 s[4:5] 34; MUBUF-NEXT: s_add_u32 s4, s4, svm_eval_nodes@rel32@lo+4 35; MUBUF-NEXT: s_addc_u32 s5, s5, svm_eval_nodes@rel32@hi+12 36; MUBUF-NEXT: s_waitcnt lgkmcnt(0) 37; MUBUF-NEXT: v_mov_b32_e32 v0, s0 38; MUBUF-NEXT: s_mov_b64 s[0:1], s[36:37] 39; MUBUF-NEXT: s_mov_b64 s[2:3], s[38:39] 40; MUBUF-NEXT: s_swappc_b64 s[30:31], s[4:5] 41; MUBUF-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 42; MUBUF-NEXT: s_and_saveexec_b32 s0, vcc_lo 43; MUBUF-NEXT: s_cbranch_execz BB0_2 44; MUBUF-NEXT: ; %bb.1: ; %if.then4.i 45; MUBUF-NEXT: s_clause 0x1 46; MUBUF-NEXT: buffer_load_dword v0, v40, s[36:39], 0 offen 47; MUBUF-NEXT: buffer_load_dword v1, v40, s[36:39], 0 offen offset:4 48; MUBUF-NEXT: s_waitcnt vmcnt(0) 49; MUBUF-NEXT: v_add_nc_u32_e32 v0, v1, v0 50; MUBUF-NEXT: v_mul_lo_u32 v0, 0x41c64e6d, v0 51; MUBUF-NEXT: v_add_nc_u32_e32 v0, 0x3039, v0 52; MUBUF-NEXT: buffer_store_dword v0, v0, s[36:39], 0 offen 53; MUBUF-NEXT: BB0_2: ; %shader_eval_surface.exit 54; MUBUF-NEXT: s_endpgm 55; 56; FLATSCR-LABEL: kernel_background_evaluate: 57; FLATSCR: ; %bb.0: ; %entry 58; FLATSCR-NEXT: s_add_u32 s2, s2, s5 59; FLATSCR-NEXT: s_movk_i32 s32, 0x6000 60; FLATSCR-NEXT: s_addc_u32 s3, s3, 0 61; FLATSCR-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s2 62; FLATSCR-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s3 63; FLATSCR-NEXT: s_load_dword s0, s[0:1], 0x24 64; FLATSCR-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0 65; FLATSCR-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1 66; FLATSCR-NEXT: s_mov_b32 s38, -1 67; FLATSCR-NEXT: s_mov_b32 s39, 0x31c16000 68; FLATSCR-NEXT: s_add_u32 s36, s36, s5 69; FLATSCR-NEXT: s_addc_u32 s37, s37, 0 70; FLATSCR-NEXT: v_mov_b32_e32 v1, 0x2000 71; FLATSCR-NEXT: v_mov_b32_e32 v2, 0x4000 72; FLATSCR-NEXT: v_mov_b32_e32 v3, 0 73; FLATSCR-NEXT: v_mov_b32_e32 v4, 0x400000 74; FLATSCR-NEXT: ; implicit-def: $vcc_hi 75; FLATSCR-NEXT: s_getpc_b64 s[4:5] 76; FLATSCR-NEXT: s_add_u32 s4, s4, svm_eval_nodes@rel32@lo+4 77; FLATSCR-NEXT: s_addc_u32 s5, s5, svm_eval_nodes@rel32@hi+12 78; FLATSCR-NEXT: s_waitcnt lgkmcnt(0) 79; FLATSCR-NEXT: v_mov_b32_e32 v0, s0 80; FLATSCR-NEXT: s_mov_b64 s[0:1], s[36:37] 81; FLATSCR-NEXT: s_mov_b64 s[2:3], s[38:39] 82; FLATSCR-NEXT: s_swappc_b64 s[30:31], s[4:5] 83; FLATSCR-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0 84; FLATSCR-NEXT: s_and_saveexec_b32 s0, vcc_lo 85; FLATSCR-NEXT: s_cbranch_execz BB0_2 86; FLATSCR-NEXT: ; %bb.1: ; %if.then4.i 87; FLATSCR-NEXT: s_movk_i32 vcc_lo, 0x4000 88; FLATSCR-NEXT: s_nop 1 89; FLATSCR-NEXT: scratch_load_dword v0, off, vcc_lo offset:4 90; FLATSCR-NEXT: s_waitcnt_depctr 0xffe3 91; FLATSCR-NEXT: s_movk_i32 vcc_lo, 0x4000 92; FLATSCR-NEXT: scratch_load_dword v1, off, vcc_lo offset:8 93; FLATSCR-NEXT: s_waitcnt vmcnt(0) 94; FLATSCR-NEXT: v_add_nc_u32_e32 v0, v1, v0 95; FLATSCR-NEXT: v_mul_lo_u32 v0, 0x41c64e6d, v0 96; FLATSCR-NEXT: v_add_nc_u32_e32 v0, 0x3039, v0 97; FLATSCR-NEXT: scratch_store_dword off, v0, s0 98; FLATSCR-NEXT: BB0_2: ; %shader_eval_surface.exit 99; FLATSCR-NEXT: s_endpgm 100entry: 101 %sd = alloca < 1339 x i32>, align 8192, addrspace(5) 102 %state = alloca <4 x i32>, align 16, addrspace(5) 103 %rslt = call i32 @svm_eval_nodes(float addrspace(5)* %kg, <1339 x i32> addrspace(5)* %sd, <4 x i32> addrspace(5)* %state, i32 0, i32 4194304) 104 %cmp = icmp eq i32 %rslt, 0 105 br i1 %cmp, label %shader_eval_surface.exit, label %if.then4.i 106 107if.then4.i: ; preds = %entry 108 %rng_hash.i.i = getelementptr inbounds < 4 x i32>, <4 x i32> addrspace(5)* %state, i32 0, i32 1 109 %tmp0 = load i32, i32 addrspace(5)* %rng_hash.i.i, align 4 110 %rng_offset.i.i = getelementptr inbounds <4 x i32>, <4 x i32> addrspace(5)* %state, i32 0, i32 2 111 %tmp1 = load i32, i32 addrspace(5)* %rng_offset.i.i, align 4 112 %add.i.i = add i32 %tmp1, %tmp0 113 %add1.i.i = add i32 %add.i.i, 0 114 %mul.i.i.i.i = mul i32 %add1.i.i, 1103515245 115 %add.i.i.i.i = add i32 %mul.i.i.i.i, 12345 116 store i32 %add.i.i.i.i, i32 addrspace(5)* undef, align 16 117 br label %shader_eval_surface.exit 118 119shader_eval_surface.exit: ; preds = %entry 120 ret void 121} 122 123declare hidden i32 @svm_eval_nodes(float addrspace(5)*, <1339 x i32> addrspace(5)*, <4 x i32> addrspace(5)*, i32, i32) local_unnamed_addr 124