1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s 2 3; Check that we properly realign the stack. While 4-byte access is all 4; that is ever needed, some transformations rely on the known bits from the alignment of the pointer (e.g. 5 6 7; 128 byte object 8; 4 byte emergency stack slot 9; = 144 bytes with padding between them 10 11; GCN-LABEL: {{^}}needs_align16_default_stack_align: 12; GCN-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 4, v0 13; GCN-DAG: v_lshrrev_b32_e64 [[FRAMEDIFF:v[0-9]+]], 6, s32 14; GCN: v_add_u32_e32 [[FI:v[0-9]+]], vcc, [[FRAMEDIFF]], [[SCALED_IDX]] 15 16; GCN-NOT: s32 17 18; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 19; GCN: v_or_b32_e32 v{{[0-9]+}}, 12 20; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 21; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 22; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 23 24; GCN-NOT: s32 25 26; GCN: ; ScratchSize: 144 27define void @needs_align16_default_stack_align(i32 %idx) #0 { 28 %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5) 29 %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx 30 store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16 31 ret void 32} 33 34; GCN-LABEL: {{^}}needs_align16_stack_align4: 35; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x3c0{{$}} 36; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xfffffc00 37 38; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 39; GCN: v_or_b32_e32 v{{[0-9]+}}, 12 40; GCN: s_add_u32 s32, s32, 0x2800{{$}} 41; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 42; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 43; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 44 45; GCN: s_sub_u32 s32, s32, 0x2800 46 47; GCN: ; ScratchSize: 160 48define void @needs_align16_stack_align4(i32 %idx) #2 { 49 %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5) 50 %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx 51 store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16 52 ret void 53} 54 55; GCN-LABEL: {{^}}needs_align32: 56; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x7c0{{$}} 57; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xfffff800 58 59; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 60; GCN: v_or_b32_e32 v{{[0-9]+}}, 12 61; GCN: s_add_u32 s32, s32, 0x3000{{$}} 62; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 63; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 64; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 65 66; GCN: s_sub_u32 s32, s32, 0x3000 67 68; GCN: ; ScratchSize: 192 69define void @needs_align32(i32 %idx) #0 { 70 %alloca.align16 = alloca [8 x <4 x i32>], align 32, addrspace(5) 71 %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx 72 store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 32 73 ret void 74} 75 76; GCN-LABEL: {{^}}force_realign4: 77; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0xc0{{$}} 78; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xffffff00 79; GCN: s_add_u32 s32, s32, 0xd00{{$}} 80 81; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen 82; GCN: s_sub_u32 s32, s32, 0xd00 83 84; GCN: ; ScratchSize: 52 85define void @force_realign4(i32 %idx) #1 { 86 %alloca.align16 = alloca [8 x i32], align 4, addrspace(5) 87 %gep0 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca.align16, i32 0, i32 %idx 88 store volatile i32 3, i32 addrspace(5)* %gep0, align 4 89 ret void 90} 91 92; GCN-LABEL: {{^}}kernel_call_align16_from_8: 93; GCN: s_movk_i32 s32, 0x400{{$}} 94; GCN-NOT: s32 95; GCN: s_swappc_b64 96define amdgpu_kernel void @kernel_call_align16_from_8() #0 { 97 %alloca = alloca i32, align 4, addrspace(5) 98 store volatile i32 2, i32 addrspace(5)* %alloca 99 call void @needs_align16_default_stack_align(i32 1) 100 ret void 101} 102 103; The call sequence should keep the stack on call aligned to 4 104; GCN-LABEL: {{^}}kernel_call_align16_from_5: 105; GCN: s_movk_i32 s32, 0x400 106; GCN: s_swappc_b64 107define amdgpu_kernel void @kernel_call_align16_from_5() { 108 %alloca0 = alloca i8, align 1, addrspace(5) 109 store volatile i8 2, i8 addrspace(5)* %alloca0 110 111 call void @needs_align16_default_stack_align(i32 1) 112 ret void 113} 114 115; GCN-LABEL: {{^}}kernel_call_align4_from_5: 116; GCN: s_movk_i32 s32, 0x400 117; GCN: s_swappc_b64 118define amdgpu_kernel void @kernel_call_align4_from_5() { 119 %alloca0 = alloca i8, align 1, addrspace(5) 120 store volatile i8 2, i8 addrspace(5)* %alloca0 121 122 call void @needs_align16_stack_align4(i32 1) 123 ret void 124} 125 126; GCN-LABEL: {{^}}default_realign_align128: 127; GCN: s_add_u32 [[TMP:s[0-9]+]], s32, 0x1fc0 128; GCN-NEXT: s_mov_b32 [[FP_COPY:s[0-9]+]], s33 129; GCN-NEXT: s_and_b32 s33, [[TMP]], 0xffffe000 130; GCN-NEXT: s_add_u32 s32, s32, 0x4000 131; GCN-NOT: s33 132; GCN: buffer_store_dword v0, off, s[0:3], s33{{$}} 133; GCN: s_sub_u32 s32, s32, 0x4000 134; GCN: s_mov_b32 s33, [[FP_COPY]] 135define void @default_realign_align128(i32 %idx) #0 { 136 %alloca.align = alloca i32, align 128, addrspace(5) 137 store volatile i32 9, i32 addrspace(5)* %alloca.align, align 128 138 ret void 139} 140 141; GCN-LABEL: {{^}}disable_realign_align128: 142; GCN-NOT: s32 143; GCN: buffer_store_dword v0, off, s[0:3], s32{{$}} 144; GCN-NOT: s32 145define void @disable_realign_align128(i32 %idx) #3 { 146 %alloca.align = alloca i32, align 128, addrspace(5) 147 store volatile i32 9, i32 addrspace(5)* %alloca.align, align 128 148 ret void 149} 150 151attributes #0 = { noinline nounwind } 152attributes #1 = { noinline nounwind "stackrealign" } 153attributes #2 = { noinline nounwind alignstack=4 } 154attributes #3 = { noinline nounwind "no-realign-stack" } 155