1; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032 %s
2; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1064 %s
3; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032 %s
4; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1064 %s
5; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032,GFX10DEFWAVE %s
6
7; GCN-LABEL: {{^}}test_vopc_i32:
8; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}}
9; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc_lo
10; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}}
11; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc{{$}}
12define amdgpu_kernel void @test_vopc_i32(i32 addrspace(1)* %arg) {
13  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
14  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid
15  %load = load i32, i32 addrspace(1)* %gep, align 4
16  %cmp = icmp sgt i32 %load, 0
17  %sel = select i1 %cmp, i32 1, i32 2
18  store i32 %sel, i32 addrspace(1)* %gep, align 4
19  ret void
20}
21
22; GCN-LABEL: {{^}}test_vopc_f32:
23; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}}
24; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc_lo
25; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}}
26; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc{{$}}
27define amdgpu_kernel void @test_vopc_f32(float addrspace(1)* %arg) {
28  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
29  %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid
30  %load = load float, float addrspace(1)* %gep, align 4
31  %cmp = fcmp ugt float %load, 0.0
32  %sel = select i1 %cmp, float 1.0, float 2.0
33  store float %sel, float addrspace(1)* %gep, align 4
34  ret void
35}
36
37; GCN-LABEL: {{^}}test_vopc_vcmp:
38; GFX1032: v_cmp_nle_f32_e32 vcc_lo, 0, v{{[0-9]+}}
39; GFX1064: v_cmp_nle_f32_e32 vcc, 0, v{{[0-9]+}}
40define amdgpu_ps void @test_vopc_vcmp(float %x) {
41  %cmp = fcmp oge float %x, 0.0
42  call void @llvm.amdgcn.kill(i1 %cmp)
43  ret void
44}
45
46; GCN-LABEL: {{^}}test_vopc_2xf16:
47; GFX1032: v_cmp_le_f16_sdwa [[SC:vcc_lo|s[0-9]+]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD
48; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]]
49; GFX1064: v_cmp_le_f16_sdwa [[SC:vcc|s\[[0-9:]+\]]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD
50; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]]
51define amdgpu_kernel void @test_vopc_2xf16(<2 x half> addrspace(1)* %arg) {
52  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
53  %gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i32 %lid
54  %load = load <2 x half>, <2 x half> addrspace(1)* %gep, align 4
55  %elt = extractelement <2 x half> %load, i32 1
56  %cmp = fcmp ugt half %elt, 0.0
57  %sel = select i1 %cmp, <2 x half> <half 1.0, half 1.0>, <2 x half> %load
58  store <2 x half> %sel, <2 x half> addrspace(1)* %gep, align 4
59  ret void
60}
61
62; GCN-LABEL: {{^}}test_vopc_class:
63; GFX1032: v_cmp_class_f32_e64 [[C:vcc_lo|s[0-9:]+]], s{{[0-9]+}}, 0x204
64; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]
65; GFX1064: v_cmp_class_f32_e64 [[C:vcc|s\[[0-9:]+\]]], s{{[0-9]+}}, 0x204
66; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]{{$}}
67define amdgpu_kernel void @test_vopc_class(i32 addrspace(1)* %out, float %x) #0 {
68  %fabs = tail call float @llvm.fabs.f32(float %x)
69  %cmp = fcmp oeq float %fabs, 0x7FF0000000000000
70  %ext = zext i1 %cmp to i32
71  store i32 %ext, i32 addrspace(1)* %out, align 4
72  ret void
73}
74
75; GCN-LABEL: {{^}}test_vcmp_vcnd_f16:
76; GFX1032: v_cmp_neq_f16_e64 [[C:vcc_lo|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}}
77; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]
78
79; GFX1064: v_cmp_neq_f16_e64 [[C:vcc|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}}
80; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]{{$}}
81define amdgpu_kernel void @test_vcmp_vcnd_f16(half addrspace(1)* %out, half %x) #0 {
82  %cmp = fcmp oeq half %x, 0x7FF0000000000000
83  %sel = select i1 %cmp, half 1.0, half %x
84  store half %sel, half addrspace(1)* %out, align 2
85  ret void
86}
87
88; GCN-LABEL: {{^}}test_vop3_cmp_f32_sop_and:
89; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}}
90; GFX1032: v_cmp_nle_f32_e64 [[C2:s[0-9]+]], 1.0, v{{[0-9]+}}
91; GFX1032: s_and_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]]
92; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]]
93; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}}
94; GFX1064: v_cmp_nle_f32_e64 [[C2:s\[[0-9:]+\]]], 1.0, v{{[0-9]+}}
95; GFX1064: s_and_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]]
96; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]]
97define amdgpu_kernel void @test_vop3_cmp_f32_sop_and(float addrspace(1)* %arg) {
98  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
99  %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid
100  %load = load float, float addrspace(1)* %gep, align 4
101  %cmp = fcmp ugt float %load, 0.0
102  %cmp2 = fcmp ult float %load, 1.0
103  %and = and i1 %cmp, %cmp2
104  %sel = select i1 %and, float 1.0, float 2.0
105  store float %sel, float addrspace(1)* %gep, align 4
106  ret void
107}
108
109; GCN-LABEL: {{^}}test_vop3_cmp_i32_sop_xor:
110; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}}
111; GFX1032: v_cmp_gt_i32_e64 [[C2:s[0-9]+]], 1, v{{[0-9]+}}
112; GFX1032: s_xor_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]]
113; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
114; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}}
115; GFX1064: v_cmp_gt_i32_e64 [[C2:s\[[0-9:]+\]]], 1, v{{[0-9]+}}
116; GFX1064: s_xor_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]]
117; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
118define amdgpu_kernel void @test_vop3_cmp_i32_sop_xor(i32 addrspace(1)* %arg) {
119  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
120  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid
121  %load = load i32, i32 addrspace(1)* %gep, align 4
122  %cmp = icmp sgt i32 %load, 0
123  %cmp2 = icmp slt i32 %load, 1
124  %xor = xor i1 %cmp, %cmp2
125  %sel = select i1 %xor, i32 1, i32 2
126  store i32 %sel, i32 addrspace(1)* %gep, align 4
127  ret void
128}
129
130; GCN-LABEL: {{^}}test_vop3_cmp_u32_sop_or:
131; GFX1032: v_cmp_lt_u32_e32 vcc_lo, 3, v{{[0-9]+}}
132; GFX1032: v_cmp_gt_u32_e64 [[C2:s[0-9]+]], 2, v{{[0-9]+}}
133; GFX1032: s_or_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]]
134; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
135; GFX1064: v_cmp_lt_u32_e32 vcc, 3, v{{[0-9]+}}
136; GFX1064: v_cmp_gt_u32_e64 [[C2:s\[[0-9:]+\]]], 2, v{{[0-9]+}}
137; GFX1064: s_or_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]]
138; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
139define amdgpu_kernel void @test_vop3_cmp_u32_sop_or(i32 addrspace(1)* %arg) {
140  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
141  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid
142  %load = load i32, i32 addrspace(1)* %gep, align 4
143  %cmp = icmp ugt i32 %load, 3
144  %cmp2 = icmp ult i32 %load, 2
145  %or = or i1 %cmp, %cmp2
146  %sel = select i1 %or, i32 1, i32 2
147  store i32 %sel, i32 addrspace(1)* %gep, align 4
148  ret void
149}
150
151; GCN-LABEL: {{^}}test_mask_if:
152; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo
153; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}}
154; GCN: s_cbranch_execz
155define amdgpu_kernel void @test_mask_if(i32 addrspace(1)* %arg) #0 {
156  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
157  %cmp = icmp ugt i32 %lid, 10
158  br i1 %cmp, label %if, label %endif
159
160if:
161  store i32 0, i32 addrspace(1)* %arg, align 4
162  br label %endif
163
164endif:
165  ret void
166}
167
168; GCN-LABEL: {{^}}test_loop_with_if:
169; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}}
170; GFX1032: s_andn2_b32 exec_lo, exec_lo, s{{[0-9]+}}
171; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}]
172; GFX1064: s_andn2_b64 exec, exec, s[{{[0-9:]+}}]
173; GCN:     s_cbranch_execz
174; GCN:   .LBB{{.*}}:
175; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo
176; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}}
177; GCN:     s_cbranch_execz
178; GCN:   ; %bb.{{[0-9]+}}:
179; GCN:   .LBB{{.*}}:
180; GFX1032: s_xor_b32 s{{[0-9]+}}, exec_lo, s{{[0-9]+}}
181; GFX1064: s_xor_b64 s[{{[0-9:]+}}], exec, s[{{[0-9:]+}}]
182; GCN:   ; %bb.{{[0-9]+}}:
183; GCN:   ; %bb.{{[0-9]+}}:
184; GFX1032: s_or_b32 exec_lo, exec_lo, s{{[0-9]+}}
185; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, s{{[0-9]+}}
186; GFX1064: s_or_b64 exec, exec, s[{{[0-9:]+}}]
187; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}}
188; GCN:     s_cbranch_execz .LBB
189; GCN:   ; %bb.{{[0-9]+}}:
190; GCN:   .LBB{{.*}}:
191; GCN:     s_endpgm
192define amdgpu_kernel void @test_loop_with_if(i32 addrspace(1)* %arg) #0 {
193bb:
194  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
195  br label %bb2
196
197bb1:
198  ret void
199
200bb2:
201  %tmp3 = phi i32 [ 0, %bb ], [ %tmp15, %bb13 ]
202  %tmp4 = icmp slt i32 %tmp3, %tmp
203  br i1 %tmp4, label %bb5, label %bb11
204
205bb5:
206  %tmp6 = sext i32 %tmp3 to i64
207  %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp6
208  %tmp8 = load i32, i32 addrspace(1)* %tmp7, align 4
209  %tmp9 = icmp sgt i32 %tmp8, 10
210  br i1 %tmp9, label %bb10, label %bb11
211
212bb10:
213  store i32 %tmp, i32 addrspace(1)* %tmp7, align 4
214  br label %bb13
215
216bb11:
217  %tmp12 = sdiv i32 %tmp3, 2
218  br label %bb13
219
220bb13:
221  %tmp14 = phi i32 [ %tmp3, %bb10 ], [ %tmp12, %bb11 ]
222  %tmp15 = add nsw i32 %tmp14, 1
223  %tmp16 = icmp slt i32 %tmp14, 255
224  br i1 %tmp16, label %bb2, label %bb1
225}
226
227; GCN-LABEL: {{^}}test_loop_with_if_else_break:
228; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo
229; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}}
230; GCN:     s_cbranch_execz
231; GCN:   ; %bb.{{[0-9]+}}: ; %.preheader
232; GCN:   .LBB{{.*}}:
233
234; GCN:     global_store_dword
235; GFX1032: s_or_b32 [[MASK0:s[0-9]+]], [[MASK0]], vcc_lo
236; GFX1064: s_or_b64 [[MASK0:s\[[0-9:]+\]]], [[MASK0]], vcc
237; GFX1032: s_andn2_b32 [[MASK1:s[0-9]+]], [[MASK1]], exec_lo
238; GFX1064: s_andn2_b64 [[MASK1:s\[[0-9:]+\]]], [[MASK1]], exec
239; GFX1032: s_and_b32 [[MASK0]], [[MASK0]], exec_lo
240; GFX1064: s_and_b64 [[MASK0]], [[MASK0]], exec
241; GFX1032: s_or_b32 [[MASK1]], [[MASK1]], [[MASK0]]
242; GFX1064: s_or_b64 [[MASK1]], [[MASK1]], [[MASK0]]
243; GCN:   .LBB{{.*}}: ; %Flow
244; GFX1032: s_and_b32 [[TMP0:s[0-9]+]], exec_lo, [[MASK1]]
245; GFX1064: s_and_b64 [[TMP0:s\[[0-9:]+\]]], exec, [[MASK1]]
246; GFX1032: s_or_b32  [[ACC:s[0-9]+]], [[TMP0]], [[ACC]]
247; GFX1064: s_or_b64  [[ACC:s\[[0-9:]+\]]], [[TMP0]], [[ACC]]
248; GFX1032: s_andn2_b32 exec_lo, exec_lo, [[ACC]]
249; GFX1064: s_andn2_b64 exec, exec, [[ACC]]
250; GCN:     s_cbranch_execz
251; GCN:   .LBB{{.*}}:
252
253; GFX1032-DAG: s_or_b32 [[MASK1]], [[MASK1]], exec_lo
254; GFX1064-DAG: s_or_b64 [[MASK1]], [[MASK1]], exec
255; GCN-DAG: global_load_dword [[LOAD:v[0-9]+]]
256; GFX1032: v_cmp_gt_i32_e32 vcc_lo, 11, [[LOAD]]
257; GFX1064: v_cmp_gt_i32_e32 vcc, 11, [[LOAD]]
258define amdgpu_kernel void @test_loop_with_if_else_break(i32 addrspace(1)* %arg) #0 {
259bb:
260  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
261  %tmp1 = icmp eq i32 %tmp, 0
262  br i1 %tmp1, label %.loopexit, label %.preheader
263
264.preheader:
265  br label %bb2
266
267bb2:
268  %tmp3 = phi i32 [ %tmp9, %bb8 ], [ 0, %.preheader ]
269  %tmp4 = zext i32 %tmp3 to i64
270  %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp4
271  %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4
272  %tmp7 = icmp sgt i32 %tmp6, 10
273  br i1 %tmp7, label %bb8, label %.loopexit
274
275bb8:
276  store i32 %tmp, i32 addrspace(1)* %tmp5, align 4
277  %tmp9 = add nuw nsw i32 %tmp3, 1
278  %tmp10 = icmp ult i32 %tmp9, 256
279  %tmp11 = icmp ult i32 %tmp9, %tmp
280  %tmp12 = and i1 %tmp10, %tmp11
281  br i1 %tmp12, label %bb2, label %.loopexit
282
283.loopexit:
284  ret void
285}
286
287; GCN-LABEL: {{^}}test_addc_vop2b:
288; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, s{{[0-9]+}}
289; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, vcc_lo
290; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, s{{[0-9]+}}
291; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}}
292define amdgpu_kernel void @test_addc_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 {
293bb:
294  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
295  %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
296  %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8
297  %tmp5 = add nsw i64 %tmp4, %arg1
298  store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8
299  ret void
300}
301
302; GCN-LABEL: {{^}}test_subbrev_vop2b:
303; GFX1032: v_sub_co_u32 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], v{{[0-9]+}}, s{{[0-9]+}}{{$}}
304; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}}
305; GFX1064: v_sub_co_u32 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], v{{[0-9]+}}, s{{[0-9]+}}{{$}}
306; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}}
307define amdgpu_kernel void @test_subbrev_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 {
308bb:
309  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
310  %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
311  %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8
312  %tmp5 = sub nsw i64 %tmp4, %arg1
313  store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8
314  ret void
315}
316
317; GCN-LABEL: {{^}}test_subb_vop2b:
318; GFX1032: v_sub_co_u32 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], s{{[0-9]+}}, v{{[0-9]+}}{{$}}
319; GFX1032: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}}
320; GFX1064: v_sub_co_u32 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], s{{[0-9]+}}, v{{[0-9]+}}{{$}}
321; GFX1064: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}}
322define amdgpu_kernel void @test_subb_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 {
323bb:
324  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
325  %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
326  %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8
327  %tmp5 = sub nsw i64 %arg1, %tmp4
328  store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8
329  ret void
330}
331
332; GCN-LABEL: {{^}}test_udiv64:
333; GCN: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
334; GCN: s_addc_u32 s{{[0-9]+}}, 0, s{{[0-9]+}}
335; GCN: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
336; GCN: s_addc_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
337; GCN: s_addc_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0
338; GCN: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
339; GCN: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
340; GCN: s_addc_u32 s{{[0-9]+}}, 0, s{{[0-9]+}}
341; GCN: s_addc_u32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
342define amdgpu_kernel void @test_udiv64(i64 addrspace(1)* %arg) #0 {
343bb:
344  %tmp = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 1
345  %tmp1 = load i64, i64 addrspace(1)* %tmp, align 8
346  %tmp2 = load i64, i64 addrspace(1)* %arg, align 8
347  %tmp3 = udiv i64 %tmp1, %tmp2
348  %tmp4 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 2
349  store i64 %tmp3, i64 addrspace(1)* %tmp4, align 8
350  ret void
351}
352
353; GCN-LABEL: {{^}}test_div_scale_f32:
354; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
355; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
356define amdgpu_kernel void @test_div_scale_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
357  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
358  %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
359  %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
360
361  %a = load volatile float, float addrspace(1)* %gep.0, align 4
362  %b = load volatile float, float addrspace(1)* %gep.1, align 4
363
364  %result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
365  %result0 = extractvalue { float, i1 } %result, 0
366  store float %result0, float addrspace(1)* %out, align 4
367  ret void
368}
369
370; GCN-LABEL: {{^}}test_div_scale_f64:
371; GFX1032: v_div_scale_f64 v[{{[0-9:]+}}], s{{[0-9]+}}, v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}]
372; GFX1064: v_div_scale_f64 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}]
373define amdgpu_kernel void @test_div_scale_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) #0 {
374  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
375  %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
376  %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
377
378  %a = load volatile double, double addrspace(1)* %gep.0, align 8
379  %b = load volatile double, double addrspace(1)* %gep.1, align 8
380
381  %result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
382  %result0 = extractvalue { double, i1 } %result, 0
383  store double %result0, double addrspace(1)* %out, align 8
384  ret void
385}
386
387; GCN-LABEL: {{^}}test_mad_i64_i32:
388; GFX1032: v_mad_i64_i32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
389; GFX1064: v_mad_i64_i32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
390define i64 @test_mad_i64_i32(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
391  %sext0 = sext i32 %arg0 to i64
392  %sext1 = sext i32 %arg1 to i64
393  %mul = mul i64 %sext0, %sext1
394  %mad = add i64 %mul, %arg2
395  ret i64 %mad
396}
397
398; GCN-LABEL: {{^}}test_mad_u64_u32:
399; GFX1032: v_mad_u64_u32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
400; GFX1064: v_mad_u64_u32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
401define i64 @test_mad_u64_u32(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
402  %sext0 = zext i32 %arg0 to i64
403  %sext1 = zext i32 %arg1 to i64
404  %mul = mul i64 %sext0, %sext1
405  %mad = add i64 %mul, %arg2
406  ret i64 %mad
407}
408
409; GCN-LABEL: {{^}}test_div_fmas_f32:
410; GCN:     s_bitcmp1_b32 s{{[0-9]+}}, 0
411; GFX1032: s_cselect_b32 vcc_lo, -1, 0
412; GFX1064: s_cselect_b64 vcc, -1, 0
413; GCN:     v_div_fmas_f32 v{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
414define amdgpu_kernel void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
415  %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone
416  store float %result, float addrspace(1)* %out, align 4
417  ret void
418}
419
420; GCN-LABEL: {{^}}test_div_fmas_f64:
421; GCN:     s_bitcmp1_b32 s{{[0-9]+}}, 0
422; GFX1032: s_cselect_b32 vcc_lo, -1, 0
423; GFX1064: s_cselect_b64 vcc, -1, 0
424; GCN-DAG: v_div_fmas_f64 v[{{[0-9:]+}}], {{[vs]}}[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}]
425define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind {
426  %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) nounwind readnone
427  store double %result, double addrspace(1)* %out, align 8
428  ret void
429}
430
431; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc:
432; GFX1032: s_mov_b32 [[VCC:vcc_lo]], 0{{$}}
433; GFX1064: s_mov_b64 [[VCC:vcc]], 0{{$}}
434; GFX1032: s_and_saveexec_b32 [[SAVE:s[0-9]+]], s{{[0-9]+}}{{$}}
435; GFX1064: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], s[{{[0-9:]+}}]{{$}}
436
437; GCN: load_dword [[LOAD:v[0-9]+]]
438; GCN: v_cmp_ne_u32_e32 [[VCC]], 0, [[LOAD]]
439
440; GCN: .LBB{{[0-9_]+}}:
441; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE]]
442; GFX1064: s_or_b64 exec, exec, [[SAVE]]
443; GCN: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
444define amdgpu_kernel void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) #0 {
445entry:
446  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
447  %gep.out = getelementptr float, float addrspace(1)* %out, i32 2
448  %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid
449  %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1
450  %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2
451
452  %a = load float, float addrspace(1)* %gep.a
453  %b = load float, float addrspace(1)* %gep.b
454  %c = load float, float addrspace(1)* %gep.c
455
456  %cmp0 = icmp eq i32 %tid, 0
457  br i1 %cmp0, label %bb, label %exit
458
459bb:
460  %val = load volatile i32, i32 addrspace(1)* %dummy
461  %cmp1 = icmp ne i32 %val, 0
462  br label %exit
463
464exit:
465  %cond = phi i1 [false, %entry], [%cmp1, %bb]
466  %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cond) nounwind readnone
467  store float %result, float addrspace(1)* %gep.out, align 4
468  ret void
469}
470
471; GCN-LABEL: {{^}}fdiv_f32:
472; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
473; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
474; GCN: v_rcp_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}
475; GFX1032: v_div_scale_f32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
476; GFX1064: v_div_scale_f32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
477
478; GCN-NOT: vcc
479; GCN: v_div_fmas_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
480define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) #0 {
481entry:
482  %fdiv = fdiv float %a, %b
483  store float %fdiv, float addrspace(1)* %out
484  ret void
485}
486
487; GCN-LABEL: {{^}}test_br_cc_f16:
488; GFX1032:  v_cmp_nlt_f16_e32 vcc_lo,
489; GFX1064:  v_cmp_nlt_f16_e32 vcc,
490; GCN-NEXT: s_cbranch_vccnz
491define amdgpu_kernel void @test_br_cc_f16(
492    half addrspace(1)* %r,
493    half addrspace(1)* %a,
494    half addrspace(1)* %b) {
495entry:
496  %a.val = load half, half addrspace(1)* %a
497  %b.val = load half, half addrspace(1)* %b
498  %fcmp = fcmp olt half %a.val, %b.val
499  br i1 %fcmp, label %one, label %two
500
501one:
502  store half %a.val, half addrspace(1)* %r
503  ret void
504
505two:
506  store half %b.val, half addrspace(1)* %r
507  ret void
508}
509
510; GCN-LABEL: {{^}}test_brcc_i1:
511; GCN:      s_bitcmp0_b32 s{{[0-9]+}}, 0
512; GCN-NEXT: s_cbranch_scc1
513define amdgpu_kernel void @test_brcc_i1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i1 %val) #0 {
514  %cmp0 = icmp ne i1 %val, 0
515  br i1 %cmp0, label %store, label %end
516
517store:
518  store i32 222, i32 addrspace(1)* %out
519  ret void
520
521end:
522  ret void
523}
524
525; GCN-LABEL: {{^}}test_preserve_condition_undef_flag:
526; GFX1032-DAG: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0
527; GFX1032-DAG: v_cmp_ngt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 0
528; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0
529; GFX1032: s_or_b32 [[OR1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
530; GFX1032: s_or_b32 [[OR2:s[0-9]+]], [[OR1]], s{{[0-9]+}}
531; GFX1032: s_and_b32 vcc_lo, exec_lo, [[OR2]]
532; GFX1064-DAG: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0
533; GFX1064-DAG: v_cmp_ngt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 0
534; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0
535; GFX1064: s_or_b64 [[OR1:s\[[0-9:]+\]]], s[{{[0-9:]+}}], s[{{[0-9:]+}}]
536; GFX1064: s_or_b64 [[OR2:s\[[0-9:]+\]]], [[OR1]], s[{{[0-9:]+}}]
537; GFX1064: s_and_b64 vcc, exec, [[OR2]]
538; GCN:     s_cbranch_vccnz
539define amdgpu_kernel void @test_preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) #0 {
540bb0:
541  %tmp = icmp sgt i32 %arg1, 4
542  %undef = call i1 @llvm.amdgcn.class.f32(float undef, i32 undef)
543  %tmp4 = select i1 %undef, float %arg, float 1.000000e+00
544  %tmp5 = fcmp ogt float %arg2, 0.000000e+00
545  %tmp6 = fcmp olt float %arg2, 1.000000e+00
546  %tmp7 = fcmp olt float %arg, %tmp4
547  %tmp8 = and i1 %tmp5, %tmp6
548  %tmp9 = and i1 %tmp8, %tmp7
549  br i1 %tmp9, label %bb1, label %bb2
550
551bb1:
552  store volatile i32 0, i32 addrspace(1)* undef
553  br label %bb2
554
555bb2:
556  ret void
557}
558
559; GCN-LABEL: {{^}}test_invert_true_phi_cond_break_loop:
560; GFX1032: s_xor_b32 s{{[0-9]+}}, s{{[0-9]+}}, -1
561; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
562; GFX1064: s_xor_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], -1
563; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}]
564define amdgpu_kernel void @test_invert_true_phi_cond_break_loop(i32 %arg) #0 {
565bb:
566  %id = call i32 @llvm.amdgcn.workitem.id.x()
567  %tmp = sub i32 %id, %arg
568  br label %bb1
569
570bb1:                                              ; preds = %Flow, %bb
571  %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
572  %lsr.iv.next = add i32 %lsr.iv, 1
573  %cmp0 = icmp slt i32 %lsr.iv.next, 0
574  br i1 %cmp0, label %bb4, label %Flow
575
576bb4:                                              ; preds = %bb1
577  %load = load volatile i32, i32 addrspace(1)* undef, align 4
578  %cmp1 = icmp sge i32 %tmp, %load
579  br label %Flow
580
581Flow:                                             ; preds = %bb4, %bb1
582  %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
583  %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
584  br i1 %tmp3, label %bb1, label %bb9
585
586bb9:                                              ; preds = %Flow
587  store volatile i32 7, i32 addrspace(3)* undef
588  ret void
589}
590
591; GCN-LABEL: {{^}}test_movrels_extract_neg_offset_vgpr:
592; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 1, v{{[0-9]+}}
593; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc_lo
594; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 2, v{{[0-9]+}}
595; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc_lo
596; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 3, v{{[0-9]+}}
597; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc_lo
598; GFX1064: v_cmp_eq_u32_e32 vcc, 1, v{{[0-9]+}}
599; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
600; GFX1064: v_cmp_ne_u32_e32 vcc, 2, v{{[0-9]+}}
601; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc
602; GFX1064: v_cmp_ne_u32_e32 vcc, 3, v{{[0-9]+}}
603; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc
604define amdgpu_kernel void @test_movrels_extract_neg_offset_vgpr(i32 addrspace(1)* %out) #0 {
605entry:
606  %id = call i32 @llvm.amdgcn.workitem.id.x() #1
607  %index = add i32 %id, -512
608  %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
609  store i32 %value, i32 addrspace(1)* %out
610  ret void
611}
612
613; GCN-LABEL: {{^}}test_set_inactive:
614; GFX1032: s_not_b32 exec_lo, exec_lo
615; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 42
616; GFX1032: s_not_b32 exec_lo, exec_lo
617; GFX1064: s_not_b64 exec, exec{{$}}
618; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 42
619; GFX1064: s_not_b64 exec, exec{{$}}
620define amdgpu_kernel void @test_set_inactive(i32 addrspace(1)* %out, i32 %in) #0 {
621  %tmp = call i32 @llvm.amdgcn.set.inactive.i32(i32 %in, i32 42)
622  store i32 %tmp, i32 addrspace(1)* %out
623  ret void
624}
625
626; GCN-LABEL: {{^}}test_set_inactive_64:
627; GFX1032: s_not_b32 exec_lo, exec_lo
628; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0
629; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0
630; GFX1032: s_not_b32 exec_lo, exec_lo
631; GFX1064: s_not_b64 exec, exec{{$}}
632; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0
633; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0
634; GFX1064: s_not_b64 exec, exec{{$}}
635define amdgpu_kernel void @test_set_inactive_64(i64 addrspace(1)* %out, i64 %in) #0 {
636  %tmp = call i64 @llvm.amdgcn.set.inactive.i64(i64 %in, i64 0)
637  store i64 %tmp, i64 addrspace(1)* %out
638  ret void
639}
640
641; GCN-LABEL: {{^}}test_kill_i1_terminator_float:
642; GFX1032: s_mov_b32 exec_lo, 0
643; GFX1064: s_mov_b64 exec, 0
644define amdgpu_ps void @test_kill_i1_terminator_float() #0 {
645  call void @llvm.amdgcn.kill(i1 false)
646  ret void
647}
648
649; GCN-LABEL: {{^}}test_kill_i1_terminator_i1:
650; GFX1032: s_mov_b32 [[LIVE:s[0-9]+]], exec_lo
651; GFX1032: s_or_b32 [[OR:s[0-9]+]],
652; GFX1032: s_xor_b32 [[KILL:s[0-9]+]], [[OR]], exec_lo
653; GFX1032: s_andn2_b32 [[MASK:s[0-9]+]], [[LIVE]], [[KILL]]
654; GFX1032: s_and_b32 exec_lo, exec_lo, [[MASK]]
655; GFX1064: s_mov_b64 [[LIVE:s\[[0-9:]+\]]], exec
656; GFX1064: s_or_b64 [[OR:s\[[0-9:]+\]]],
657; GFX1064: s_xor_b64 [[KILL:s\[[0-9:]+\]]], [[OR]], exec
658; GFX1064: s_andn2_b64 [[MASK:s\[[0-9:]+\]]], [[LIVE]], [[KILL]]
659; GFX1064: s_and_b64 exec, exec, [[MASK]]
660define amdgpu_gs void @test_kill_i1_terminator_i1(i32 %a, i32 %b, i32 %c, i32 %d) #0 {
661  %c1 = icmp slt i32 %a, %b
662  %c2 = icmp slt i32 %c, %d
663  %x = or i1 %c1, %c2
664  call void @llvm.amdgcn.kill(i1 %x)
665  call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 false, i1 false)
666  ret void
667}
668
669; GCN-LABEL: {{^}}test_loop_vcc:
670; GFX1032: v_cmp_lt_f32_e32 vcc_lo,
671; GFX1064: v_cmp_lt_f32_e32 vcc,
672; GCN: s_cbranch_vccz
673define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) #0 {
674entry:
675  br label %loop
676
677loop:
678  %ctr.iv = phi float [ 0.0, %entry ], [ %ctr.next, %body ]
679  %c.iv = phi <4 x float> [ %in, %entry ], [ %c.next, %body ]
680  %cc = fcmp ogt float %ctr.iv, 7.0
681  br i1 %cc, label %break, label %body
682
683body:
684  %c.iv0 = extractelement <4 x float> %c.iv, i32 0
685  %c.next = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float %c.iv0, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0)
686  %ctr.next = fadd float %ctr.iv, 2.0
687  br label %loop
688
689break:
690  ret <4 x float> %c.iv
691}
692
693; NOTE: llvm.amdgcn.wwm is deprecated, use llvm.amdgcn.strict.wwm instead.
694; GCN-LABEL: {{^}}test_wwm1:
695; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1
696; GFX1032: s_mov_b32 exec_lo, [[SAVE]]
697; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1
698; GFX1064: s_mov_b64 exec, [[SAVE]]
699define amdgpu_ps float @test_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) {
700main_body:
701  %out = fadd float %src0, %src1
702  %out.0 = call float @llvm.amdgcn.wwm.f32(float %out)
703  ret float %out.0
704}
705
706; GCN-LABEL: {{^}}test_wwm2:
707; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 16, v{{[0-9]+}}
708; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo
709; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1
710; GFX1032: s_mov_b32 exec_lo, [[SAVE2]]
711; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]]
712; GFX1064: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
713; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}}
714; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1
715; GFX1064: s_mov_b64 exec, [[SAVE2]]
716; GFX1064: s_or_b64 exec, exec, [[SAVE1]]
717define amdgpu_ps float @test_wwm2(i32 inreg %idx) {
718main_body:
719  ; use mbcnt to make sure the branch is divergent
720  %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
721  %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo)
722  %cc = icmp uge i32 %hi, 16
723  br i1 %cc, label %endif, label %if
724
725if:
726  %src = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0, i32 0)
727  %out = fadd float %src, %src
728  %out.0 = call float @llvm.amdgcn.wwm.f32(float %out)
729  %out.1 = fadd float %src, %out.0
730  br label %endif
731
732endif:
733  %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ]
734  ret float %out.2
735}
736
737; GCN-LABEL: {{^}}test_strict_wwm1:
738; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1
739; GFX1032: s_mov_b32 exec_lo, [[SAVE]]
740; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1
741; GFX1064: s_mov_b64 exec, [[SAVE]]
742define amdgpu_ps float @test_strict_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) {
743main_body:
744  %out = fadd float %src0, %src1
745  %out.0 = call float @llvm.amdgcn.strict.wwm.f32(float %out)
746  ret float %out.0
747}
748
749; GCN-LABEL: {{^}}test_strict_wwm2:
750; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 16, v{{[0-9]+}}
751; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo
752; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1
753; GFX1032: s_mov_b32 exec_lo, [[SAVE2]]
754; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]]
755; GFX1064: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
756; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}}
757; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1
758; GFX1064: s_mov_b64 exec, [[SAVE2]]
759; GFX1064: s_or_b64 exec, exec, [[SAVE1]]
760define amdgpu_ps float @test_strict_wwm2(i32 inreg %idx) {
761main_body:
762  ; use mbcnt to make sure the branch is divergent
763  %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
764  %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo)
765  %cc = icmp uge i32 %hi, 16
766  br i1 %cc, label %endif, label %if
767
768if:
769  %src = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0, i32 0)
770  %out = fadd float %src, %src
771  %out.0 = call float @llvm.amdgcn.strict.wwm.f32(float %out)
772  %out.1 = fadd float %src, %out.0
773  br label %endif
774
775endif:
776  %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ]
777  ret float %out.2
778}
779
780
781; GCN-LABEL: {{^}}test_wqm1:
782; GFX1032: s_mov_b32 [[ORIG:s[0-9]+]], exec_lo
783; GFX1032: s_wqm_b32 exec_lo, exec_lo
784; GFX1032: s_and_b32 exec_lo, exec_lo, [[ORIG]]
785; GFX1064: s_mov_b64 [[ORIG:s\[[0-9]+:[0-9]+\]]], exec{{$}}
786; GFX1064: s_wqm_b64 exec, exec{{$}}
787; GFX1064: s_and_b64 exec, exec, [[ORIG]]
788define amdgpu_ps <4 x float> @test_wqm1(i32 inreg, i32 inreg, i32 inreg, i32 inreg %m0, <8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, <2 x float> %pos) #0 {
789main_body:
790  %inst23 = extractelement <2 x float> %pos, i32 0
791  %inst24 = extractelement <2 x float> %pos, i32 1
792  %inst25 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 0, i32 0, i32 %m0)
793  %inst26 = tail call float @llvm.amdgcn.interp.p2(float %inst25, float %inst24, i32 0, i32 0, i32 %m0)
794  %inst28 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 1, i32 0, i32 %m0)
795  %inst29 = tail call float @llvm.amdgcn.interp.p2(float %inst28, float %inst24, i32 1, i32 0, i32 %m0)
796  %tex = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %inst26, float %inst29, <8 x i32> %rsrc, <4 x i32> %sampler, i1 0, i32 0, i32 0)
797  ret <4 x float> %tex
798}
799
800; GCN-LABEL: {{^}}test_wqm2:
801; GFX1032: s_wqm_b32 exec_lo, exec_lo
802; GFX1032: s_and_b32 exec_lo, exec_lo, s{{[0-9]+}}
803; GFX1064: s_wqm_b64 exec, exec{{$}}
804; GFX1064: s_and_b64 exec, exec, s[{{[0-9:]+}}]
805define amdgpu_ps float @test_wqm2(i32 inreg %idx0, i32 inreg %idx1) #0 {
806main_body:
807  %src0 = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx0, i32 0, i32 0, i32 0)
808  %src1 = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx1, i32 0, i32 0, i32 0)
809  %out = fadd float %src0, %src1
810  %out.0 = bitcast float %out to i32
811  %out.1 = call i32 @llvm.amdgcn.wqm.i32(i32 %out.0)
812  %out.2 = bitcast i32 %out.1 to float
813  ret float %out.2
814}
815
816; GCN-LABEL: {{^}}test_intr_fcmp_i64:
817; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}}
818; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
819; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
820; GFX1064:     v_cmp_eq_f32_e64 s[[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
821; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
822; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]]
823; GCN:         store_dwordx2 v{{[0-9]+}}, v[[[V_LO]]:[[V_HI]]], s
824define amdgpu_kernel void @test_intr_fcmp_i64(i64 addrspace(1)* %out, float %src, float %a) {
825  %temp = call float @llvm.fabs.f32(float %a)
826  %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float %src, float %temp, i32 1)
827  store i64 %result, i64 addrspace(1)* %out
828  ret void
829}
830
831; GCN-LABEL: {{^}}test_intr_icmp_i64:
832; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}}
833; GFX1032-DAG: v_cmp_eq_u32_e64 [[C_LO:vcc_lo|s[0-9]+]], 0x64, {{s[0-9]+}}
834; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], [[C_LO]]
835; GFX1064:     v_cmp_eq_u32_e64 s[[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], 0x64, {{s[0-9]+}}
836; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
837; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]]
838; GCN:         store_dwordx2 v{{[0-9]+}}, v[[[V_LO]]:[[V_HI]]], s
839define amdgpu_kernel void @test_intr_icmp_i64(i64 addrspace(1)* %out, i32 %src) {
840  %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %src, i32 100, i32 32)
841  store i64 %result, i64 addrspace(1)* %out
842  ret void
843}
844
845; GCN-LABEL: {{^}}test_intr_fcmp_i32:
846; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
847; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
848; GFX1064:     v_cmp_eq_f32_e64 s[[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
849; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
850; GCN:         store_dword v{{[0-9]+}}, v[[V_LO]], s
851define amdgpu_kernel void @test_intr_fcmp_i32(i32 addrspace(1)* %out, float %src, float %a) {
852  %temp = call float @llvm.fabs.f32(float %a)
853  %result = call i32 @llvm.amdgcn.fcmp.i32.f32(float %src, float %temp, i32 1)
854  store i32 %result, i32 addrspace(1)* %out
855  ret void
856}
857
858; GCN-LABEL: {{^}}test_intr_icmp_i32:
859; GFX1032-DAG: v_cmp_eq_u32_e64 s[[C_LO:[0-9]+]], 0x64, {{s[0-9]+}}
860; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}}
861; GFX1064:     v_cmp_eq_u32_e64 s[[[C_LO:[0-9]+]]:{{[0-9]+}}], 0x64, {{s[0-9]+}}
862; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}}
863; GCN:         store_dword v{{[0-9]+}}, v[[V_LO]], s
864define amdgpu_kernel void @test_intr_icmp_i32(i32 addrspace(1)* %out, i32 %src) {
865  %result = call i32 @llvm.amdgcn.icmp.i32.i32(i32 %src, i32 100, i32 32)
866  store i32 %result, i32 addrspace(1)* %out
867  ret void
868}
869
870; GCN-LABEL: {{^}}test_wqm_vote:
871; GFX1032: v_cmp_neq_f32_e32 vcc_lo, 0
872; GFX1032: s_mov_b32 [[LIVE:s[0-9]+]], exec_lo
873; GFX1032: s_wqm_b32 [[WQM:s[0-9]+]], vcc_lo
874; GFX1032: s_xor_b32 [[KILL:s[0-9]+]], [[WQM]], exec_lo
875; GFX1032: s_andn2_b32 [[MASK:s[0-9]+]], [[LIVE]], [[KILL]]
876; GFX1032: s_and_b32 exec_lo, exec_lo, [[MASK]]
877; GFX1064: v_cmp_neq_f32_e32 vcc, 0
878; GFX1064: s_mov_b64 [[LIVE:s\[[0-9:]+\]]], exec
879; GFX1064: s_wqm_b64 [[WQM:s\[[0-9:]+\]]], vcc
880; GFX1064: s_xor_b64 [[KILL:s\[[0-9:]+\]]], [[WQM]], exec
881; GFX1064: s_andn2_b64 [[MASK:s\[[0-9:]+\]]], [[LIVE]], [[KILL]]
882; GFX1064: s_and_b64 exec, exec, [[MASK]]
883define amdgpu_ps void @test_wqm_vote(float %a) {
884  %c1 = fcmp une float %a, 0.0
885  %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1)
886  call void @llvm.amdgcn.kill(i1 %c2)
887  call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 false, i1 false)
888  ret void
889}
890
891; GCN-LABEL: {{^}}test_branch_true:
892; GFX1032: s_mov_b32 vcc_lo, exec_lo
893; GFX1064: s_mov_b64 vcc, exec
894define amdgpu_kernel void @test_branch_true() #2 {
895entry:
896  br i1 true, label %for.end, label %for.body.lr.ph
897
898for.body.lr.ph:                                   ; preds = %entry
899  br label %for.body
900
901for.body:                                         ; preds = %for.body, %for.body.lr.ph
902  br i1 undef, label %for.end, label %for.body
903
904for.end:                                          ; preds = %for.body, %entry
905  ret void
906}
907
908; GCN-LABEL: {{^}}test_ps_live:
909; GFX1032: s_mov_b32 [[C:s[0-9]+]], exec_lo
910; GFX1064: s_mov_b64 [[C:s\[[0-9:]+\]]], exec{{$}}
911; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]
912define amdgpu_ps float @test_ps_live() #0 {
913  %live = call i1 @llvm.amdgcn.ps.live()
914  %live.32 = zext i1 %live to i32
915  %r = bitcast i32 %live.32 to float
916  ret float %r
917}
918
919; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64:
920; GFX1032: v_cmp_neq_f64_e64 [[C:s[0-9]+]], s[{{[0-9:]+}}], 1.0
921; GFX1032: s_and_b32 vcc_lo, exec_lo, [[C]]
922; GFX1064: v_cmp_neq_f64_e64 [[C:s\[[0-9:]+\]]], s[{{[0-9:]+}}], 1.0
923; GFX1064: s_and_b64 vcc, exec, [[C]]
924define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
925entry:
926  %v = load double, double addrspace(1)* %in
927  %cc = fcmp oeq double %v, 1.000000e+00
928  br i1 %cc, label %if, label %endif
929
930if:
931  %u = fadd double %v, %v
932  br label %endif
933
934endif:
935  %r = phi double [ %v, %entry ], [ %u, %if ]
936  store double %r, double addrspace(1)* %out
937  ret void
938}
939
940; GCN-LABEL: {{^}}test_vgprblocks_w32_attr:
941; Test that the wave size can be overridden in function attributes and that the block size is correct as a result
942; GFX10DEFWAVE: ; VGPRBlocks: 1
943define amdgpu_gs float @test_vgprblocks_w32_attr(float %a, float %b, float %c, float %d, float %e,
944                                        float %f, float %g, float %h, float %i, float %j, float %k, float %l) #3 {
945main_body:
946  %s = fadd float %a, %b
947  %s.1 = fadd float %s, %c
948  %s.2 = fadd float %s.1, %d
949  %s.3 = fadd float %s.2, %e
950  %s.4 = fadd float %s.3, %f
951  %s.5 = fadd float %s.4, %g
952  %s.6 = fadd float %s.5, %h
953  %s.7 = fadd float %s.6, %i
954  %s.8 = fadd float %s.7, %j
955  %s.9 = fadd float %s.8, %k
956  %s.10 = fadd float %s.9, %l
957  ret float %s.10
958}
959
960; GCN-LABEL: {{^}}test_vgprblocks_w64_attr:
961; Test that the wave size can be overridden in function attributes and that the block size is correct as a result
962; GFX10DEFWAVE: ; VGPRBlocks: 2
963define amdgpu_gs float @test_vgprblocks_w64_attr(float %a, float %b, float %c, float %d, float %e,
964                                        float %f, float %g, float %h, float %i, float %j, float %k, float %l) #4 {
965main_body:
966  %s = fadd float %a, %b
967  %s.1 = fadd float %s, %c
968  %s.2 = fadd float %s.1, %d
969  %s.3 = fadd float %s.2, %e
970  %s.4 = fadd float %s.3, %f
971  %s.5 = fadd float %s.4, %g
972  %s.6 = fadd float %s.5, %h
973  %s.7 = fadd float %s.6, %i
974  %s.8 = fadd float %s.7, %j
975  %s.9 = fadd float %s.8, %k
976  %s.10 = fadd float %s.9, %l
977  ret float %s.10
978}
979
980; GCN-LABEL: {{^}}icmp64:
981; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v
982; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v
983define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
984entry:
985  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
986  %mul4 = mul nsw i32 %s, %n
987  %cmp = icmp slt i32 0, %mul4
988  br label %if.end
989
990if.end:                                           ; preds = %entry
991  %rem = urem i32 %id, %s
992  %icmp = tail call i64 @llvm.amdgcn.icmp.i64.i32(i32 %rem, i32 0, i32 32)
993  %shr = lshr i64 %icmp, 1
994  %notmask = shl nsw i64 -1, 0
995  %and = and i64 %notmask, %shr
996  %or = or i64 %and, -9223372036854775808
997  %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true)
998  %cast = trunc i64 %cttz to i32
999  %cmp3 = icmp ugt i32 10, %cast
1000  %cmp6 = icmp ne i32 %rem, 0
1001  %brmerge = or i1 %cmp6, %cmp3
1002  br i1 %brmerge, label %if.end2, label %if.then
1003
1004if.then:                                          ; preds = %if.end
1005  unreachable
1006
1007if.end2:                                          ; preds = %if.end
1008  ret void
1009}
1010
1011; GCN-LABEL: {{^}}fcmp64:
1012; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v
1013; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v
1014define amdgpu_kernel void @fcmp64(float %n, float %s) {
1015entry:
1016  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
1017  %id.f = uitofp i32 %id to float
1018  %mul4 = fmul float %s, %n
1019  %cmp = fcmp ult float 0.0, %mul4
1020  br label %if.end
1021
1022if.end:                                           ; preds = %entry
1023  %rem.f = frem float %id.f, %s
1024  %fcmp = tail call i64 @llvm.amdgcn.fcmp.i64.f32(float %rem.f, float 0.0, i32 1)
1025  %shr = lshr i64 %fcmp, 1
1026  %notmask = shl nsw i64 -1, 0
1027  %and = and i64 %notmask, %shr
1028  %or = or i64 %and, -9223372036854775808
1029  %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true)
1030  %cast = trunc i64 %cttz to i32
1031  %cmp3 = icmp ugt i32 10, %cast
1032  %cmp6 = fcmp one float %rem.f, 0.0
1033  %brmerge = or i1 %cmp6, %cmp3
1034  br i1 %brmerge, label %if.end2, label %if.then
1035
1036if.then:                                          ; preds = %if.end
1037  unreachable
1038
1039if.end2:                                          ; preds = %if.end
1040  ret void
1041}
1042
1043; GCN-LABEL: {{^}}icmp32:
1044; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v
1045; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v
1046define amdgpu_kernel void @icmp32(i32 %n, i32 %s) {
1047entry:
1048  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
1049  %mul4 = mul nsw i32 %s, %n
1050  %cmp = icmp slt i32 0, %mul4
1051  br label %if.end
1052
1053if.end:                                           ; preds = %entry
1054  %rem = urem i32 %id, %s
1055  %icmp = tail call i32 @llvm.amdgcn.icmp.i32.i32(i32 %rem, i32 0, i32 32)
1056  %shr = lshr i32 %icmp, 1
1057  %notmask = shl nsw i32 -1, 0
1058  %and = and i32 %notmask, %shr
1059  %or = or i32 %and, 2147483648
1060  %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true)
1061  %cmp3 = icmp ugt i32 10, %cttz
1062  %cmp6 = icmp ne i32 %rem, 0
1063  %brmerge = or i1 %cmp6, %cmp3
1064  br i1 %brmerge, label %if.end2, label %if.then
1065
1066if.then:                                          ; preds = %if.end
1067  unreachable
1068
1069if.end2:                                          ; preds = %if.end
1070  ret void
1071}
1072
1073; GCN-LABEL: {{^}}fcmp32:
1074; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v
1075; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v
1076define amdgpu_kernel void @fcmp32(float %n, float %s) {
1077entry:
1078  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
1079  %id.f = uitofp i32 %id to float
1080  %mul4 = fmul float %s, %n
1081  %cmp = fcmp ult float 0.0, %mul4
1082  br label %if.end
1083
1084if.end:                                           ; preds = %entry
1085  %rem.f = frem float %id.f, %s
1086  %fcmp = tail call i32 @llvm.amdgcn.fcmp.i32.f32(float %rem.f, float 0.0, i32 1)
1087  %shr = lshr i32 %fcmp, 1
1088  %notmask = shl nsw i32 -1, 0
1089  %and = and i32 %notmask, %shr
1090  %or = or i32 %and, 2147483648
1091  %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true)
1092  %cmp3 = icmp ugt i32 10, %cttz
1093  %cmp6 = fcmp one float %rem.f, 0.0
1094  %brmerge = or i1 %cmp6, %cmp3
1095  br i1 %brmerge, label %if.end2, label %if.then
1096
1097if.then:                                          ; preds = %if.end
1098  unreachable
1099
1100if.end2:                                          ; preds = %if.end
1101  ret void
1102}
1103
1104declare void @external_void_func_void() #1
1105
1106; Test save/restore of VGPR needed for SGPR spilling.
1107
1108; GCN-LABEL: {{^}}callee_no_stack_with_call:
1109; GCN: s_waitcnt
1110; GCN-NEXT: s_waitcnt_vscnt
1111
1112; GFX1064-NEXT: s_or_saveexec_b64 [[COPY_EXEC0:s\[[0-9]+:[0-9]+\]]], -1{{$}}
1113; GFX1032-NEXT: s_or_saveexec_b32 [[COPY_EXEC0:s[0-9]+]], -1{{$}}
1114; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
1115; GCN-NEXT: s_waitcnt_depctr 0xffe3
1116; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC0]]
1117; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC0]]
1118
1119; GCN-NEXT: v_writelane_b32 v40, s33, 2
1120; GCN: s_mov_b32 s33, s32
1121; GFX1064: s_addk_i32 s32, 0x400
1122; GFX1032: s_addk_i32 s32, 0x200
1123
1124
1125; GCN-DAG: v_writelane_b32 v40, s30, 0
1126; GCN-DAG: v_writelane_b32 v40, s31, 1
1127; GCN: s_swappc_b64
1128; GCN-DAG: v_readlane_b32 s30, v40, 0
1129; GCN-DAG: v_readlane_b32 s31, v40, 1
1130
1131
1132; GFX1064: s_addk_i32 s32, 0xfc00
1133; GFX1032: s_addk_i32 s32, 0xfe00
1134; GCN: v_readlane_b32 s33, v40, 2
1135; GFX1064: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}}
1136; GFX1032: s_or_saveexec_b32 [[COPY_EXEC1:s[0-9]]], -1{{$}}
1137; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
1138; GCN-NEXT: s_waitcnt_depctr 0xffe3
1139; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
1140; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC1]]
1141; GCN-NEXT: s_waitcnt vmcnt(0)
1142; GCN-NEXT: s_setpc_b64
1143define void @callee_no_stack_with_call() #1 {
1144  call void @external_void_func_void()
1145  ret void
1146}
1147
1148
1149declare i32 @llvm.amdgcn.workitem.id.x()
1150declare float @llvm.fabs.f32(float)
1151declare { float, i1 } @llvm.amdgcn.div.scale.f32(float, float, i1)
1152declare { double, i1 } @llvm.amdgcn.div.scale.f64(double, double, i1)
1153declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1)
1154declare double @llvm.amdgcn.div.fmas.f64(double, double, double, i1)
1155declare i1 @llvm.amdgcn.class.f32(float, i32)
1156declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32)
1157declare i64 @llvm.amdgcn.set.inactive.i64(i64, i64)
1158declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32)
1159declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32)
1160declare float @llvm.amdgcn.strict.wwm.f32(float)
1161declare float @llvm.amdgcn.wwm.f32(float)
1162declare i32 @llvm.amdgcn.wqm.i32(i32)
1163declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32)
1164declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32)
1165declare float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32>, i32, i32, i32, i32 immarg)
1166declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
1167declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
1168declare i64 @llvm.amdgcn.fcmp.i64.f32(float, float, i32)
1169declare i64 @llvm.amdgcn.icmp.i64.i32(i32, i32, i32)
1170declare i32 @llvm.amdgcn.fcmp.i32.f32(float, float, i32)
1171declare i32 @llvm.amdgcn.icmp.i32.i32(i32, i32, i32)
1172declare void @llvm.amdgcn.kill(i1)
1173declare i1 @llvm.amdgcn.wqm.vote(i1)
1174declare i1 @llvm.amdgcn.ps.live()
1175declare i64 @llvm.cttz.i64(i64, i1)
1176declare i32 @llvm.cttz.i32(i32, i1)
1177declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #5
1178
1179attributes #0 = { nounwind readnone speculatable }
1180attributes #1 = { nounwind }
1181attributes #2 = { nounwind readnone optnone noinline }
1182attributes #3 = { "target-features"="+wavefrontsize32" }
1183attributes #4 = { "target-features"="+wavefrontsize64" }
1184attributes #5 = { inaccessiblememonly nounwind }
1185