1; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2
3; Check that we properly realign the stack. While 4-byte access is all
4; that is ever needed, some transformations rely on the known bits from the alignment of the pointer (e.g.
5
6
7; 128 byte object
8; 4 byte emergency stack slot
9; = 144 bytes with padding between them
10
11; GCN-LABEL: {{^}}needs_align16_default_stack_align:
12; GCN-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 4, v0
13; GCN-DAG: v_lshrrev_b32_e64 [[FRAMEDIFF:v[0-9]+]], 6, s32
14; GCN: v_add_u32_e32 [[FI:v[0-9]+]], vcc, [[FRAMEDIFF]], [[SCALED_IDX]]
15
16; GCN-NOT: s32
17
18; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
19; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
20; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
21; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
22; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
23
24; GCN-NOT: s32
25
26; GCN: ; ScratchSize: 144
27define void @needs_align16_default_stack_align(i32 %idx) #0 {
28  %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
29  %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
30  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16
31  ret void
32}
33
34; GCN-LABEL: {{^}}needs_align16_stack_align4:
35; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x3c0{{$}}
36; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xfffffc00
37
38; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
39; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
40; GCN: s_add_u32 s32, s32, 0x2800{{$}}
41; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
42; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
43; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
44
45; GCN: s_sub_u32 s32, s32, 0x2800
46
47; GCN: ; ScratchSize: 160
48define void @needs_align16_stack_align4(i32 %idx) #2 {
49  %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
50  %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
51  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16
52  ret void
53}
54
55; GCN-LABEL: {{^}}needs_align32:
56; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x7c0{{$}}
57; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xfffff800
58
59; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
60; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
61; GCN: s_add_u32 s32, s32, 0x3000{{$}}
62; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
63; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
64; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
65
66; GCN: s_sub_u32 s32, s32, 0x3000
67
68; GCN: ; ScratchSize: 192
69define void @needs_align32(i32 %idx) #0 {
70  %alloca.align16 = alloca [8 x <4 x i32>], align 32, addrspace(5)
71  %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
72  store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 32
73  ret void
74}
75
76; GCN-LABEL: {{^}}force_realign4:
77; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0xc0{{$}}
78; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xffffff00
79; GCN: s_add_u32 s32, s32, 0xd00{{$}}
80
81; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], 0 offen
82; GCN: s_sub_u32 s32, s32, 0xd00
83
84; GCN: ; ScratchSize: 52
85define void @force_realign4(i32 %idx) #1 {
86  %alloca.align16 = alloca [8 x i32], align 4, addrspace(5)
87  %gep0 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca.align16, i32 0, i32 %idx
88  store volatile i32 3, i32 addrspace(5)* %gep0, align 4
89  ret void
90}
91
92; GCN-LABEL: {{^}}kernel_call_align16_from_8:
93; GCN: s_movk_i32 s32, 0x400{{$}}
94; GCN-NOT: s32
95; GCN: s_swappc_b64
96define amdgpu_kernel void @kernel_call_align16_from_8() #0 {
97  %alloca = alloca i32, align 4, addrspace(5)
98  store volatile i32 2, i32 addrspace(5)* %alloca
99  call void @needs_align16_default_stack_align(i32 1)
100  ret void
101}
102
103; The call sequence should keep the stack on call aligned to 4
104; GCN-LABEL: {{^}}kernel_call_align16_from_5:
105; GCN: s_movk_i32 s32, 0x400
106; GCN: s_swappc_b64
107define amdgpu_kernel void @kernel_call_align16_from_5() {
108  %alloca0 = alloca i8, align 1, addrspace(5)
109  store volatile i8 2, i8  addrspace(5)* %alloca0
110
111  call void @needs_align16_default_stack_align(i32 1)
112  ret void
113}
114
115; GCN-LABEL: {{^}}kernel_call_align4_from_5:
116; GCN: s_movk_i32 s32, 0x400
117; GCN: s_swappc_b64
118define amdgpu_kernel void @kernel_call_align4_from_5() {
119  %alloca0 = alloca i8, align 1, addrspace(5)
120  store volatile i8 2, i8  addrspace(5)* %alloca0
121
122  call void @needs_align16_stack_align4(i32 1)
123  ret void
124}
125
126; GCN-LABEL: {{^}}default_realign_align128:
127; GCN: s_add_u32 [[TMP:s[0-9]+]], s32, 0x1fc0
128; GCN-NEXT: s_mov_b32 [[FP_COPY:s[0-9]+]], s33
129; GCN-NEXT: s_and_b32 s33, [[TMP]], 0xffffe000
130; GCN-NEXT: s_add_u32 s32, s32, 0x4000
131; GCN-NOT: s33
132; GCN: buffer_store_dword v0, off, s[0:3], s33{{$}}
133; GCN: s_sub_u32 s32, s32, 0x4000
134; GCN: s_mov_b32 s33, [[FP_COPY]]
135define void @default_realign_align128(i32 %idx) #0 {
136  %alloca.align = alloca i32, align 128, addrspace(5)
137  store volatile i32 9, i32 addrspace(5)* %alloca.align, align 128
138  ret void
139}
140
141; GCN-LABEL: {{^}}disable_realign_align128:
142; GCN-NOT: s32
143; GCN: buffer_store_dword v0, off, s[0:3], s32{{$}}
144; GCN-NOT: s32
145define void @disable_realign_align128(i32 %idx) #3 {
146  %alloca.align = alloca i32, align 128, addrspace(5)
147  store volatile i32 9, i32 addrspace(5)* %alloca.align, align 128
148  ret void
149}
150
151declare void @extern_func(<32 x i32>, i32) #0
152define void @func_call_align1024_bp_gets_vgpr_spill(<32 x i32> %a, i32 %b) #0 {
153; The test forces the stack to be realigned to a new boundary
154; since there is a local object with an alignment of 1024.
155; Should use BP to access the incoming stack arguments.
156; The BP value is saved/restored with a VGPR spill.
157
158; GCN-LABEL: func_call_align1024_bp_gets_vgpr_spill:
159; GCN: buffer_store_dword [[VGPR_REG:v[0-9]+]], off, s[0:3], s32 offset:1028 ; 4-byte Folded Spill
160; GCN-NEXT: s_mov_b64 exec, s[4:5]
161; GCN-NEXT: v_writelane_b32 [[VGPR_REG]], s33, 2
162; GCN-NEXT: v_writelane_b32 [[VGPR_REG]], s34, 3
163; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0xffc0
164; GCN: s_and_b32 s33, [[SCRATCH_REG]], 0xffff0000
165
166; GCN: s_mov_b32 s34, s32
167; GCN-NEXT: v_mov_b32_e32 v32, 0
168
169; GCN: buffer_store_dword v32, off, s[0:3], s33 offset:1024
170; GCN-NEXT: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s34
171; GCN-NEXT: s_add_u32 s32, s32, 0x30000
172
173; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s32
174; GCN-NEXT: s_swappc_b64 s[30:31], s[4:5]
175
176; GCN: v_readlane_b32 s33, [[VGPR_REG]], 2
177; GCN-NEXT: s_sub_u32 s32, s32, 0x30000
178; GCN-NEXT: v_readlane_b32 s34, [[VGPR_REG]], 3
179; GCN-NEXT: s_or_saveexec_b64 s[6:7], -1
180; GCN-NEXT: buffer_load_dword [[VGPR_REG]], off, s[0:3], s32 offset:1028 ; 4-byte Folded Reload
181; GCN-NEXT: s_mov_b64 exec, s[6:7]
182  %temp = alloca i32, align 1024, addrspace(5)
183  store volatile i32 0, i32 addrspace(5)* %temp, align 1024
184  call void @extern_func(<32 x i32> %a, i32 %b)
185  ret void
186}
187
188%struct.Data = type { [9 x i32] }
189define i32 @needs_align1024_stack_args_used_inside_loop(%struct.Data addrspace(5)* nocapture readonly byval(%struct.Data) align 8 %arg) local_unnamed_addr #4 {
190; The local object allocation needed an alignment of 1024.
191; Since the function argument is accessed in a loop with an
192; index variable, the base pointer first get loaded into a VGPR
193; and that value should be further referenced to load the incoming values.
194; The BP value will get saved/restored in an SGPR at the prolgoue/epilogue.
195
196; GCN-LABEL: needs_align1024_stack_args_used_inside_loop:
197; GCN: s_mov_b32 [[BP_COPY:s[0-9]+]], s34
198; GCN-NEXT: s_mov_b32 s34, s32
199; GCN-NEXT: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0xffc0
200; GCN-NEXT: s_mov_b32 [[FP_COPY:s[0-9]+]], s33
201; GCN-NEXT: s_and_b32 s33, [[SCRATCH_REG]], 0xffff0000
202; GCN-NEXT: v_mov_b32_e32 v{{[0-9]+}}, 0
203; GCN-NEXT: v_lshrrev_b32_e64 [[VGPR_REG:v[0-9]+]], 6, s34
204; GCN: s_add_u32 s32, s32, 0x30000
205; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s33 offset:1024
206; GCN: buffer_load_dword v{{[0-9]+}}, [[VGPR_REG]], s[0:3], 0 offen
207; GCN: v_add_u32_e32 [[VGPR_REG]], vcc, 4, [[VGPR_REG]]
208; GCN: s_sub_u32 s32, s32, 0x30000
209; GCN-NEXT: s_mov_b32 s33, [[FP_COPY]]
210; GCN-NEXT: s_mov_b32 s34, [[BP_COPY]]
211; GCN-NEXT: s_setpc_b64 s[30:31]
212begin:
213  %local_var = alloca i32, align 1024, addrspace(5)
214  store volatile i32 0, i32 addrspace(5)* %local_var, align 1024
215  br label %loop_body
216
217loop_end:                                                ; preds = %loop_body
218  %idx_next = add nuw nsw i32 %lp_idx, 1
219  %lp_exit_cond = icmp eq i32 %idx_next, 9
220  br i1 %lp_exit_cond, label %exit, label %loop_body
221
222loop_body:                                                ; preds = %loop_end, %begin
223  %lp_idx = phi i32 [ 0, %begin ], [ %idx_next, %loop_end ]
224  %ptr = getelementptr inbounds %struct.Data, %struct.Data addrspace(5)* %arg, i32 0, i32 0, i32 %lp_idx
225  %val = load i32, i32 addrspace(5)* %ptr, align 8
226  %lp_cond = icmp eq i32 %val, %lp_idx
227  br i1 %lp_cond, label %loop_end, label %exit
228
229exit:                                               ; preds = %loop_end, %loop_body
230  %out = phi i32 [ 0, %loop_body ], [ 1, %loop_end ]
231  ret i32 %out
232}
233
234define void @no_free_scratch_sgpr_for_bp_copy(<32 x i32> %a, i32 %b) #0 {
235; GCN-LABEL: no_free_scratch_sgpr_for_bp_copy:
236; GCN: ; %bb.0:
237; GCN: v_writelane_b32 [[VGPR_REG:v[0-9]+]], s34, 0
238; GCN-NEXT: s_mov_b32 s34, s32
239; GCN-NEXT: buffer_load_dword v{{[0-9]+}}, off, s[0:3], s34
240; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s33 offset:128
241; GCN-NEXT: ;;#ASMSTART
242; GCN-NEXT: ;;#ASMEND
243; GCN: v_readlane_b32 s34, [[VGPR_REG:v[0-9]+]], 0
244; GCN-NEXT: s_waitcnt vmcnt(0)
245; GCN-NEXT: s_setpc_b64 s[30:31]
246  %local_val = alloca i32, align 128, addrspace(5)
247  store volatile i32 %b, i32 addrspace(5)* %local_val, align 128
248  ; Use all clobberable registers, so BP has to spill to a VGPR.
249  call void asm sideeffect "",
250    "~{s0},~{s1},~{s2},~{s3},~{s4},~{s5},~{s6},~{s7},~{s8},~{s9}
251    ,~{s10},~{s11},~{s12},~{s13},~{s14},~{s15},~{s16},~{s17},~{s18},~{s19}
252    ,~{s20},~{s21},~{s22},~{s23},~{s24},~{s25},~{s26},~{s27},~{s28},~{s29}
253    ,~{vcc_hi}"() #0
254  ret void
255}
256
257attributes #0 = { noinline nounwind }
258attributes #1 = { noinline nounwind "stackrealign" }
259attributes #2 = { noinline nounwind alignstack=4 }
260attributes #3 = { noinline nounwind "no-realign-stack" }
261attributes #4 = { noinline nounwind "frame-pointer"="all"}
262