1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=amdgcn--amdpal -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=CI %s
3; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GFX9 %s
4
5declare i32 @llvm.amdgcn.workitem.id.x() #0
6
7@lds.obj = addrspace(3) global [256 x i32] undef, align 4
8
9define amdgpu_kernel void @write_ds_sub0_offset0_global() #0 {
10; CI-LABEL: write_ds_sub0_offset0_global:
11; CI:       ; %bb.0: ; %entry
12; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
13; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
14; CI-NEXT:    v_mov_b32_e32 v1, 0x7b
15; CI-NEXT:    s_mov_b32 m0, -1
16; CI-NEXT:    ds_write_b32 v0, v1 offset:12
17; CI-NEXT:    s_endpgm
18;
19; GFX9-LABEL: write_ds_sub0_offset0_global:
20; GFX9:       ; %bb.0: ; %entry
21; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
22; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
23; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
24; GFX9-NEXT:    ds_write_b32 v0, v1 offset:12
25; GFX9-NEXT:    s_endpgm
26entry:
27  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #1
28  %sub1 = sub i32 0, %x.i
29  %tmp0 = getelementptr [256 x i32], [256 x i32] addrspace(3)* @lds.obj, i32 0, i32 %sub1
30  %arrayidx = getelementptr inbounds i32, i32 addrspace(3)* %tmp0, i32 3
31  store i32 123, i32 addrspace(3)* %arrayidx
32  ret void
33}
34
35define amdgpu_kernel void @write_ds_sub0_offset0_global_clamp_bit(float %dummy.val) #0 {
36; CI-LABEL: write_ds_sub0_offset0_global_clamp_bit:
37; CI:       ; %bb.0: ; %entry
38; CI-NEXT:    s_load_dword s0, s[0:1], 0x9
39; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
40; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
41; CI-NEXT:    s_mov_b64 vcc, 0
42; CI-NEXT:    v_mov_b32_e32 v2, 0x7b
43; CI-NEXT:    s_waitcnt lgkmcnt(0)
44; CI-NEXT:    v_mov_b32_e32 v1, s0
45; CI-NEXT:    s_mov_b32 s0, 0
46; CI-NEXT:    v_div_fmas_f32 v1, v1, v1, v1
47; CI-NEXT:    s_mov_b32 m0, -1
48; CI-NEXT:    s_mov_b32 s3, 0xf000
49; CI-NEXT:    s_mov_b32 s2, -1
50; CI-NEXT:    s_mov_b32 s1, s0
51; CI-NEXT:    ds_write_b32 v0, v2 offset:12
52; CI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
53; CI-NEXT:    s_waitcnt vmcnt(0)
54; CI-NEXT:    s_endpgm
55;
56; GFX9-LABEL: write_ds_sub0_offset0_global_clamp_bit:
57; GFX9:       ; %bb.0: ; %entry
58; GFX9-NEXT:    s_load_dword s0, s[0:1], 0x24
59; GFX9-NEXT:    s_mov_b64 vcc, 0
60; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
61; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
62; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
63; GFX9-NEXT:    v_mov_b32_e32 v1, s0
64; GFX9-NEXT:    v_div_fmas_f32 v2, v1, v1, v1
65; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
66; GFX9-NEXT:    ds_write_b32 v0, v1 offset:12
67; GFX9-NEXT:    v_mov_b32_e32 v0, 0
68; GFX9-NEXT:    v_mov_b32_e32 v1, 0
69; GFX9-NEXT:    global_store_dword v[0:1], v2, off
70; GFX9-NEXT:    s_waitcnt vmcnt(0)
71; GFX9-NEXT:    s_endpgm
72entry:
73  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #1
74  %sub1 = sub i32 0, %x.i
75  %tmp0 = getelementptr [256 x i32], [256 x i32] addrspace(3)* @lds.obj, i32 0, i32 %sub1
76  %arrayidx = getelementptr inbounds i32, i32 addrspace(3)* %tmp0, i32 3
77  store i32 123, i32 addrspace(3)* %arrayidx
78  %fmas = call float @llvm.amdgcn.div.fmas.f32(float %dummy.val, float %dummy.val, float %dummy.val, i1 false)
79  store volatile float %fmas, float addrspace(1)* null
80  ret void
81}
82
83define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset() #1 {
84; CI-LABEL: add_x_shl_neg_to_sub_max_offset:
85; CI:       ; %bb.0:
86; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
87; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
88; CI-NEXT:    v_mov_b32_e32 v1, 13
89; CI-NEXT:    s_mov_b32 m0, -1
90; CI-NEXT:    ds_write_b8 v0, v1 offset:65535
91; CI-NEXT:    s_endpgm
92;
93; GFX9-LABEL: add_x_shl_neg_to_sub_max_offset:
94; GFX9:       ; %bb.0:
95; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
96; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
97; GFX9-NEXT:    v_mov_b32_e32 v1, 13
98; GFX9-NEXT:    ds_write_b8 v0, v1 offset:65535
99; GFX9-NEXT:    s_endpgm
100  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
101  %neg = sub i32 0, %x.i
102  %shl = shl i32 %neg, 2
103  %add = add i32 65535, %shl
104  %ptr = inttoptr i32 %add to i8 addrspace(3)*
105  store i8 13, i8 addrspace(3)* %ptr
106  ret void
107}
108
109define amdgpu_kernel void @add_x_shl_neg_to_sub_max_offset_p1() #1 {
110; CI-LABEL: add_x_shl_neg_to_sub_max_offset_p1:
111; CI:       ; %bb.0:
112; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
113; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0x10000, v0
114; CI-NEXT:    v_mov_b32_e32 v1, 13
115; CI-NEXT:    s_mov_b32 m0, -1
116; CI-NEXT:    ds_write_b8 v0, v1
117; CI-NEXT:    s_endpgm
118;
119; GFX9-LABEL: add_x_shl_neg_to_sub_max_offset_p1:
120; GFX9:       ; %bb.0:
121; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
122; GFX9-NEXT:    v_sub_u32_e32 v0, 0x10000, v0
123; GFX9-NEXT:    v_mov_b32_e32 v1, 13
124; GFX9-NEXT:    ds_write_b8 v0, v1
125; GFX9-NEXT:    s_endpgm
126  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
127  %neg = sub i32 0, %x.i
128  %shl = shl i32 %neg, 2
129  %add = add i32 65536, %shl
130  %ptr = inttoptr i32 %add to i8 addrspace(3)*
131  store i8 13, i8 addrspace(3)* %ptr
132  ret void
133}
134
135define amdgpu_kernel void @add_x_shl_neg_to_sub_multi_use() #1 {
136; CI-LABEL: add_x_shl_neg_to_sub_multi_use:
137; CI:       ; %bb.0:
138; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
139; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
140; CI-NEXT:    v_mov_b32_e32 v1, 13
141; CI-NEXT:    s_mov_b32 m0, -1
142; CI-NEXT:    ds_write_b32 v0, v1 offset:123
143; CI-NEXT:    ds_write_b32 v0, v1 offset:456
144; CI-NEXT:    s_endpgm
145;
146; GFX9-LABEL: add_x_shl_neg_to_sub_multi_use:
147; GFX9:       ; %bb.0:
148; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
149; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
150; GFX9-NEXT:    v_mov_b32_e32 v1, 13
151; GFX9-NEXT:    ds_write_b32 v0, v1 offset:123
152; GFX9-NEXT:    ds_write_b32 v0, v1 offset:456
153; GFX9-NEXT:    s_endpgm
154  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
155  %neg = sub i32 0, %x.i
156  %shl = shl i32 %neg, 2
157  %add0 = add i32 123, %shl
158  %add1 = add i32 456, %shl
159  %ptr0 = inttoptr i32 %add0 to i32 addrspace(3)*
160  store volatile i32 13, i32 addrspace(3)* %ptr0
161  %ptr1 = inttoptr i32 %add1 to i32 addrspace(3)*
162  store volatile i32 13, i32 addrspace(3)* %ptr1
163  ret void
164}
165
166define amdgpu_kernel void @add_x_shl_neg_to_sub_multi_use_same_offset() #1 {
167; CI-LABEL: add_x_shl_neg_to_sub_multi_use_same_offset:
168; CI:       ; %bb.0:
169; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
170; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0, v0
171; CI-NEXT:    v_mov_b32_e32 v1, 13
172; CI-NEXT:    s_mov_b32 m0, -1
173; CI-NEXT:    ds_write_b32 v0, v1 offset:123
174; CI-NEXT:    ds_write_b32 v0, v1 offset:123
175; CI-NEXT:    s_endpgm
176;
177; GFX9-LABEL: add_x_shl_neg_to_sub_multi_use_same_offset:
178; GFX9:       ; %bb.0:
179; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
180; GFX9-NEXT:    v_sub_u32_e32 v0, 0, v0
181; GFX9-NEXT:    v_mov_b32_e32 v1, 13
182; GFX9-NEXT:    ds_write_b32 v0, v1 offset:123
183; GFX9-NEXT:    ds_write_b32 v0, v1 offset:123
184; GFX9-NEXT:    s_endpgm
185  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
186  %neg = sub i32 0, %x.i
187  %shl = shl i32 %neg, 2
188  %add = add i32 123, %shl
189  %ptr = inttoptr i32 %add to i32 addrspace(3)*
190  store volatile i32 13, i32 addrspace(3)* %ptr
191  store volatile i32 13, i32 addrspace(3)* %ptr
192  ret void
193}
194
195define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset() #1 {
196; CI-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset:
197; CI:       ; %bb.0:
198; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
199; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0x3fb, v0
200; CI-NEXT:    v_mov_b32_e32 v1, 0x7b
201; CI-NEXT:    v_mov_b32_e32 v2, 0
202; CI-NEXT:    s_mov_b32 m0, -1
203; CI-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
204; CI-NEXT:    s_endpgm
205;
206; GFX9-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset:
207; GFX9:       ; %bb.0:
208; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
209; GFX9-NEXT:    v_sub_u32_e32 v0, 0x3fb, v0
210; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
211; GFX9-NEXT:    v_mov_b32_e32 v2, 0
212; GFX9-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
213; GFX9-NEXT:    s_endpgm
214  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
215  %neg = sub i32 0, %x.i
216  %shl = shl i32 %neg, 2
217  %add = add i32 1019, %shl
218  %ptr = inttoptr i32 %add to i64 addrspace(3)*
219  store i64 123, i64 addrspace(3)* %ptr, align 4
220  ret void
221}
222
223define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset_clamp_bit(float %dummy.val) #1 {
224; CI-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset_clamp_bit:
225; CI:       ; %bb.0:
226; CI-NEXT:    s_load_dword s0, s[0:1], 0x9
227; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
228; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0x3fb, v0
229; CI-NEXT:    s_mov_b64 vcc, 0
230; CI-NEXT:    v_mov_b32_e32 v2, 0x7b
231; CI-NEXT:    s_waitcnt lgkmcnt(0)
232; CI-NEXT:    v_mov_b32_e32 v1, s0
233; CI-NEXT:    s_mov_b32 s0, 0
234; CI-NEXT:    v_div_fmas_f32 v1, v1, v1, v1
235; CI-NEXT:    v_mov_b32_e32 v3, 0
236; CI-NEXT:    s_mov_b32 m0, -1
237; CI-NEXT:    s_mov_b32 s3, 0xf000
238; CI-NEXT:    s_mov_b32 s2, -1
239; CI-NEXT:    s_mov_b32 s1, s0
240; CI-NEXT:    ds_write2_b32 v0, v2, v3 offset1:1
241; CI-NEXT:    buffer_store_dword v1, off, s[0:3], 0
242; CI-NEXT:    s_waitcnt vmcnt(0)
243; CI-NEXT:    s_endpgm
244;
245; GFX9-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset_clamp_bit:
246; GFX9:       ; %bb.0:
247; GFX9-NEXT:    s_load_dword s0, s[0:1], 0x24
248; GFX9-NEXT:    s_mov_b64 vcc, 0
249; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
250; GFX9-NEXT:    v_sub_u32_e32 v0, 0x3fb, v0
251; GFX9-NEXT:    v_mov_b32_e32 v3, 0
252; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
253; GFX9-NEXT:    v_mov_b32_e32 v1, s0
254; GFX9-NEXT:    v_div_fmas_f32 v2, v1, v1, v1
255; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
256; GFX9-NEXT:    ds_write2_b32 v0, v1, v3 offset1:1
257; GFX9-NEXT:    v_mov_b32_e32 v0, 0
258; GFX9-NEXT:    v_mov_b32_e32 v1, 0
259; GFX9-NEXT:    global_store_dword v[0:1], v2, off
260; GFX9-NEXT:    s_waitcnt vmcnt(0)
261; GFX9-NEXT:    s_endpgm
262  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
263  %neg = sub i32 0, %x.i
264  %shl = shl i32 %neg, 2
265  %add = add i32 1019, %shl
266  %ptr = inttoptr i32 %add to i64 addrspace(3)*
267  store i64 123, i64 addrspace(3)* %ptr, align 4
268  %fmas = call float @llvm.amdgcn.div.fmas.f32(float %dummy.val, float %dummy.val, float %dummy.val, i1 false)
269  store volatile float %fmas, float addrspace(1)* null
270  ret void
271}
272
273define amdgpu_kernel void @add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1() #1 {
274; CI-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1:
275; CI:       ; %bb.0:
276; CI-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
277; CI-NEXT:    v_sub_i32_e32 v0, vcc, 0x3fc, v0
278; CI-NEXT:    v_mov_b32_e32 v1, 0x7b
279; CI-NEXT:    v_mov_b32_e32 v2, 0
280; CI-NEXT:    s_mov_b32 m0, -1
281; CI-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
282; CI-NEXT:    s_endpgm
283;
284; GFX9-LABEL: add_x_shl_neg_to_sub_misaligned_i64_max_offset_p1:
285; GFX9:       ; %bb.0:
286; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 2, v0
287; GFX9-NEXT:    v_sub_u32_e32 v0, 0x3fc, v0
288; GFX9-NEXT:    v_mov_b32_e32 v1, 0x7b
289; GFX9-NEXT:    v_mov_b32_e32 v2, 0
290; GFX9-NEXT:    ds_write2_b32 v0, v1, v2 offset1:1
291; GFX9-NEXT:    s_endpgm
292  %x.i = call i32 @llvm.amdgcn.workitem.id.x() #0
293  %neg = sub i32 0, %x.i
294  %shl = shl i32 %neg, 2
295  %add = add i32 1020, %shl
296  %ptr = inttoptr i32 %add to i64 addrspace(3)*
297  store i64 123, i64 addrspace(3)* %ptr, align 4
298  ret void
299}
300
301declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1)
302
303attributes #0 = { nounwind readnone }
304attributes #1 = { nounwind }
305attributes #2 = { nounwind convergent }
306