1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2
3; Check that we properly realign the stack. While 4-byte access is all
4; that is ever needed, some transformations rely on the known bits from the alignment of the pointer (e.g.
5
6
7; 128 byte object
8; 4 byte emergency stack slot
9; = 144 bytes with padding between them
10
11; GCN-LABEL: {{^}}needs_align16_default_stack_align:
12; GCN: s_sub_u32 [[SUB:s[0-9]+]], s32, s4
13; GCN-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 4, v0
14; GCN-DAG: v_lshrrev_b32_e64 [[FRAMEDIFF:v[0-9]+]], 6, [[SUB]]
15; GCN: v_add_u32_e32 [[FI:v[0-9]+]], vcc, [[FRAMEDIFF]], [[SCALED_IDX]]
16
17; GCN-NOT: s32
18
19; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
20; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
21; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
22; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
23; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
24
25; GCN-NOT: s32
26
27; GCN: ; ScratchSize: 144
28define void @needs_align16_default_stack_align(i32 %idx) #0 {
29  %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
30  %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
31  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16
32  ret void
33}
34
35; GCN-LABEL: {{^}}needs_align16_stack_align4:
36; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x3c0{{$}}
37; GCN: s_and_b32 s5, s6, 0xfffffc00
38; GCN: s_add_u32 s32, s32, 0x2800{{$}}
39
40; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
41; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
42; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
43; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
44; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
45
46; GCN: s_sub_u32 s32, s32, 0x2800
47
48; GCN: ; ScratchSize: 160
49define void @needs_align16_stack_align4(i32 %idx) #2 {
50  %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
51  %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
52  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16
53  ret void
54}
55
56; GCN-LABEL: {{^}}needs_align32:
57; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x7c0{{$}}
58; GCN: s_and_b32 s5, s6, 0xfffff800
59; GCN: s_add_u32 s32, s32, 0x3000{{$}}
60
61; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
62; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
63; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
64; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
65; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
66
67; GCN: s_sub_u32 s32, s32, 0x3000
68
69; GCN: ; ScratchSize: 192
70define void @needs_align32(i32 %idx) #0 {
71  %alloca.align16 = alloca [8 x <4 x i32>], align 32, addrspace(5)
72  %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
73  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 32
74  ret void
75}
76
77; GCN-LABEL: {{^}}force_realign4:
78; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0xc0{{$}}
79; GCN: s_and_b32 s5, s6, 0xffffff00
80; GCN: s_add_u32 s32, s32, 0xd00{{$}}
81
82; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s4 offen
83; GCN: s_sub_u32 s32, s32, 0xd00
84
85; GCN: ; ScratchSize: 52
86define void @force_realign4(i32 %idx) #1 {
87  %alloca.align16 = alloca [8 x i32], align 4, addrspace(5)
88  %gep0 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca.align16, i32 0, i32 %idx
89  store volatile i32 3, i32 addrspace(5)* %gep0, align 4
90  ret void
91}
92
93; GCN-LABEL: {{^}}kernel_call_align16_from_8:
94; GCN: s_add_u32 s32, s8, 0x400{{$}}
95; GCN-NOT: s32
96; GCN: s_swappc_b64
97define amdgpu_kernel void @kernel_call_align16_from_8() #0 {
98  %alloca = alloca i32, align 4, addrspace(5)
99  store volatile i32 2, i32 addrspace(5)* %alloca
100  call void @needs_align16_default_stack_align(i32 1)
101  ret void
102}
103
104; The call sequence should keep the stack on call aligned to 4
105; GCN-LABEL: {{^}}kernel_call_align16_from_5:
106; GCN: s_add_u32 s32, s8, 0x400
107; GCN: s_swappc_b64
108define amdgpu_kernel void @kernel_call_align16_from_5() {
109  %alloca0 = alloca i8, align 1, addrspace(5)
110  store volatile i8 2, i8  addrspace(5)* %alloca0
111
112  call void @needs_align16_default_stack_align(i32 1)
113  ret void
114}
115
116; GCN-LABEL: {{^}}kernel_call_align4_from_5:
117; GCN: s_add_u32 s32, s8, 0x400
118; GCN: s_swappc_b64
119define amdgpu_kernel void @kernel_call_align4_from_5() {
120  %alloca0 = alloca i8, align 1, addrspace(5)
121  store volatile i8 2, i8  addrspace(5)* %alloca0
122
123  call void @needs_align16_stack_align4(i32 1)
124  ret void
125}
126
127; GCN-LABEL: {{^}}default_realign_align128:
128; GCN: s_add_u32 [[TMP:s[0-9]+]], s32, 0x1fc0
129; GCN-NEXT: s_and_b32 s5, [[TMP]], 0xffffe000
130; GCN-NEXT: s_add_u32 s32, s32, 0x4000
131; GCN-NOT: s5
132; GCN: buffer_store_dword v0, off, s[0:3], s5{{$}}
133; GCN: s_sub_u32 s32, s32, 0x4000
134define void @default_realign_align128(i32 %idx) #0 {
135  %alloca.align = alloca i32, align 128, addrspace(5)
136  store volatile i32 9, i32 addrspace(5)* %alloca.align, align 128
137  ret void
138}
139
140; GCN-LABEL: {{^}}disable_realign_align128:
141; GCN-NOT: s32
142; GCN: buffer_store_dword v0, off, s[0:3], s32{{$}}
143; GCN-NOT: s32
144define void @disable_realign_align128(i32 %idx) #3 {
145  %alloca.align = alloca i32, align 128, addrspace(5)
146  store volatile i32 9, i32 addrspace(5)* %alloca.align, align 128
147  ret void
148}
149
150attributes #0 = { noinline nounwind }
151attributes #1 = { noinline nounwind "stackrealign" }
152attributes #2 = { noinline nounwind alignstack=4 }
153attributes #3 = { noinline nounwind "no-realign-stack" }
154