1; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032 %s
2; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1064 %s
3; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032 %s
4; RUN: llc -march=amdgcn -mcpu=gfx1010 -mattr=-wavefrontsize32,+wavefrontsize64 -amdgpu-early-ifcvt=1 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1064 %s
5; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefixes=GCN,GFX1032,GFX10DEFWAVE %s
6
7; GCN-LABEL: {{^}}test_vopc_i32:
8; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}}
9; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc_lo
10; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}}
11; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, vcc{{$}}
12define amdgpu_kernel void @test_vopc_i32(i32 addrspace(1)* %arg) {
13  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
14  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid
15  %load = load i32, i32 addrspace(1)* %gep, align 4
16  %cmp = icmp sgt i32 %load, 0
17  %sel = select i1 %cmp, i32 1, i32 2
18  store i32 %sel, i32 addrspace(1)* %gep, align 4
19  ret void
20}
21
22; GCN-LABEL: {{^}}test_vopc_f32:
23; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}}
24; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc_lo
25; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}}
26; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, vcc{{$}}
27define amdgpu_kernel void @test_vopc_f32(float addrspace(1)* %arg) {
28  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
29  %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid
30  %load = load float, float addrspace(1)* %gep, align 4
31  %cmp = fcmp ugt float %load, 0.0
32  %sel = select i1 %cmp, float 1.0, float 2.0
33  store float %sel, float addrspace(1)* %gep, align 4
34  ret void
35}
36
37; GCN-LABEL: {{^}}test_vopc_vcmp:
38; GFX1032: v_cmp_nle_f32_e32 vcc_lo, 0, v{{[0-9]+}}
39; GFX1064: v_cmp_nle_f32_e32 vcc, 0, v{{[0-9]+}}
40define amdgpu_ps void @test_vopc_vcmp(float %x) {
41  %cmp = fcmp oge float %x, 0.0
42  call void @llvm.amdgcn.kill(i1 %cmp)
43  ret void
44}
45
46; GCN-LABEL: {{^}}test_vopc_2xf16:
47; GFX1032: v_cmp_le_f16_sdwa [[SC:vcc_lo|s[0-9]+]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD
48; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]]
49; GFX1064: v_cmp_le_f16_sdwa [[SC:vcc|s\[[0-9:]+\]]], {{[vs][0-9]+}}, v{{[0-9]+}} src0_sel:WORD_1 src1_sel:DWORD
50; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c003c00, v{{[0-9]+}}, [[SC]]
51define amdgpu_kernel void @test_vopc_2xf16(<2 x half> addrspace(1)* %arg) {
52  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
53  %gep = getelementptr inbounds <2 x half>, <2 x half> addrspace(1)* %arg, i32 %lid
54  %load = load <2 x half>, <2 x half> addrspace(1)* %gep, align 4
55  %elt = extractelement <2 x half> %load, i32 1
56  %cmp = fcmp ugt half %elt, 0.0
57  %sel = select i1 %cmp, <2 x half> <half 1.0, half 1.0>, <2 x half> %load
58  store <2 x half> %sel, <2 x half> addrspace(1)* %gep, align 4
59  ret void
60}
61
62; GCN-LABEL: {{^}}test_vopc_class:
63; GFX1032: v_cmp_class_f32_e64 [[C:vcc_lo|s[0-9:]+]], s{{[0-9]+}}, 0x204
64; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]
65; GFX1064: v_cmp_class_f32_e64 [[C:vcc|s\[[0-9:]+\]]], s{{[0-9]+}}, 0x204
66; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]{{$}}
67define amdgpu_kernel void @test_vopc_class(i32 addrspace(1)* %out, float %x) #0 {
68  %fabs = tail call float @llvm.fabs.f32(float %x)
69  %cmp = fcmp oeq float %fabs, 0x7FF0000000000000
70  %ext = zext i1 %cmp to i32
71  store i32 %ext, i32 addrspace(1)* %out, align 4
72  ret void
73}
74
75; GCN-LABEL: {{^}}test_vcmp_vcnd_f16:
76; GFX1032: v_cmp_neq_f16_e64 [[C:vcc_lo|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}}
77; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]
78
79; GFX1064: v_cmp_neq_f16_e64 [[C:vcc|s\[[0-9:]+\]]], 0x7c00, s{{[0-9]+}}
80; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 0x3c00, v{{[0-9]+}}, [[C]]{{$}}
81define amdgpu_kernel void @test_vcmp_vcnd_f16(half addrspace(1)* %out, half %x) #0 {
82  %cmp = fcmp oeq half %x, 0x7FF0000000000000
83  %sel = select i1 %cmp, half 1.0, half %x
84  store half %sel, half addrspace(1)* %out, align 2
85  ret void
86}
87
88; GCN-LABEL: {{^}}test_vop3_cmp_f32_sop_and:
89; GFX1032: v_cmp_nge_f32_e32 vcc_lo, 0, v{{[0-9]+}}
90; GFX1032: v_cmp_nle_f32_e64 [[C2:s[0-9]+]], 1.0, v{{[0-9]+}}
91; GFX1032: s_and_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]]
92; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]]
93; GFX1064: v_cmp_nge_f32_e32 vcc, 0, v{{[0-9]+}}
94; GFX1064: v_cmp_nle_f32_e64 [[C2:s\[[0-9:]+\]]], 1.0, v{{[0-9]+}}
95; GFX1064: s_and_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]]
96; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2.0, 1.0, [[AND]]
97define amdgpu_kernel void @test_vop3_cmp_f32_sop_and(float addrspace(1)* %arg) {
98  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
99  %gep = getelementptr inbounds float, float addrspace(1)* %arg, i32 %lid
100  %load = load float, float addrspace(1)* %gep, align 4
101  %cmp = fcmp ugt float %load, 0.0
102  %cmp2 = fcmp ult float %load, 1.0
103  %and = and i1 %cmp, %cmp2
104  %sel = select i1 %and, float 1.0, float 2.0
105  store float %sel, float addrspace(1)* %gep, align 4
106  ret void
107}
108
109; GCN-LABEL: {{^}}test_vop3_cmp_i32_sop_xor:
110; GFX1032: v_cmp_lt_i32_e32 vcc_lo, 0, v{{[0-9]+}}
111; GFX1032: v_cmp_gt_i32_e64 [[C2:s[0-9]+]], 1, v{{[0-9]+}}
112; GFX1032: s_xor_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]]
113; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
114; GFX1064: v_cmp_lt_i32_e32 vcc, 0, v{{[0-9]+}}
115; GFX1064: v_cmp_gt_i32_e64 [[C2:s\[[0-9:]+\]]], 1, v{{[0-9]+}}
116; GFX1064: s_xor_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]]
117; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
118define amdgpu_kernel void @test_vop3_cmp_i32_sop_xor(i32 addrspace(1)* %arg) {
119  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
120  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid
121  %load = load i32, i32 addrspace(1)* %gep, align 4
122  %cmp = icmp sgt i32 %load, 0
123  %cmp2 = icmp slt i32 %load, 1
124  %xor = xor i1 %cmp, %cmp2
125  %sel = select i1 %xor, i32 1, i32 2
126  store i32 %sel, i32 addrspace(1)* %gep, align 4
127  ret void
128}
129
130; GCN-LABEL: {{^}}test_vop3_cmp_u32_sop_or:
131; GFX1032: v_cmp_lt_u32_e32 vcc_lo, 3, v{{[0-9]+}}
132; GFX1032: v_cmp_gt_u32_e64 [[C2:s[0-9]+]], 2, v{{[0-9]+}}
133; GFX1032: s_or_b32 [[AND:s[0-9]+]], vcc_lo, [[C2]]
134; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
135; GFX1064: v_cmp_lt_u32_e32 vcc, 3, v{{[0-9]+}}
136; GFX1064: v_cmp_gt_u32_e64 [[C2:s\[[0-9:]+\]]], 2, v{{[0-9]+}}
137; GFX1064: s_or_b64 [[AND:s\[[0-9:]+\]]], vcc, [[C2]]
138; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 2, 1, [[AND]]
139define amdgpu_kernel void @test_vop3_cmp_u32_sop_or(i32 addrspace(1)* %arg) {
140  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
141  %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %lid
142  %load = load i32, i32 addrspace(1)* %gep, align 4
143  %cmp = icmp ugt i32 %load, 3
144  %cmp2 = icmp ult i32 %load, 2
145  %or = or i1 %cmp, %cmp2
146  %sel = select i1 %or, i32 1, i32 2
147  store i32 %sel, i32 addrspace(1)* %gep, align 4
148  ret void
149}
150
151; GCN-LABEL: {{^}}test_mask_if:
152; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo
153; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}}
154; GCN: s_cbranch_execz
155define amdgpu_kernel void @test_mask_if(i32 addrspace(1)* %arg) #0 {
156  %lid = tail call i32 @llvm.amdgcn.workitem.id.x()
157  %cmp = icmp ugt i32 %lid, 10
158  br i1 %cmp, label %if, label %endif
159
160if:
161  store i32 0, i32 addrspace(1)* %arg, align 4
162  br label %endif
163
164endif:
165  ret void
166}
167
168; GCN-LABEL: {{^}}test_loop_with_if:
169; GFX1032: s_or_b32 s{{[0-9]+}}, vcc_lo, s{{[0-9]+}}
170; GFX1032: s_andn2_b32 exec_lo, exec_lo, s{{[0-9]+}}
171; GFX1064: s_or_b64 s[{{[0-9:]+}}], vcc, s[{{[0-9:]+}}]
172; GFX1064: s_andn2_b64 exec, exec, s[{{[0-9:]+}}]
173; GCN:     s_cbranch_execz
174; GCN:   .LBB{{.*}}:
175; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo
176; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}}
177; GCN:     s_cbranch_execz
178; GCN:   ; %bb.{{[0-9]+}}:
179; GCN:   .LBB{{.*}}:
180; GFX1032: s_xor_b32 s{{[0-9]+}}, exec_lo, s{{[0-9]+}}
181; GFX1064: s_xor_b64 s[{{[0-9:]+}}], exec, s[{{[0-9:]+}}]
182; GCN:   ; %bb.{{[0-9]+}}:
183; GCN:   ; %bb.{{[0-9]+}}:
184; GFX1032: s_or_b32 exec_lo, exec_lo, s{{[0-9]+}}
185; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, s{{[0-9]+}}
186; GFX1064: s_or_b64 exec, exec, s[{{[0-9:]+}}]
187; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}]{{$}}
188; GCN:     s_cbranch_execz .LBB
189; GCN:   ; %bb.{{[0-9]+}}:
190; GCN:   .LBB{{.*}}:
191; GCN:     s_endpgm
192define amdgpu_kernel void @test_loop_with_if(i32 addrspace(1)* %arg) #0 {
193bb:
194  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
195  br label %bb2
196
197bb1:
198  ret void
199
200bb2:
201  %tmp3 = phi i32 [ 0, %bb ], [ %tmp15, %bb13 ]
202  %tmp4 = icmp slt i32 %tmp3, %tmp
203  br i1 %tmp4, label %bb5, label %bb11
204
205bb5:
206  %tmp6 = sext i32 %tmp3 to i64
207  %tmp7 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp6
208  %tmp8 = load i32, i32 addrspace(1)* %tmp7, align 4
209  %tmp9 = icmp sgt i32 %tmp8, 10
210  br i1 %tmp9, label %bb10, label %bb11
211
212bb10:
213  store i32 %tmp, i32 addrspace(1)* %tmp7, align 4
214  br label %bb13
215
216bb11:
217  %tmp12 = sdiv i32 %tmp3, 2
218  br label %bb13
219
220bb13:
221  %tmp14 = phi i32 [ %tmp3, %bb10 ], [ %tmp12, %bb11 ]
222  %tmp15 = add nsw i32 %tmp14, 1
223  %tmp16 = icmp slt i32 %tmp14, 255
224  br i1 %tmp16, label %bb2, label %bb1
225}
226
227; GCN-LABEL: {{^}}test_loop_with_if_else_break:
228; GFX1032: s_and_saveexec_b32 s{{[0-9]+}}, vcc_lo
229; GFX1064: s_and_saveexec_b64 s[{{[0-9:]+}}], vcc{{$}}
230; GCN:     s_cbranch_execz
231; GCN:   ; %bb.{{[0-9]+}}: ; %.preheader
232; GCN:   .LBB{{.*}}:
233
234; GCN:     global_store_dword
235; GFX1032: s_or_b32 [[MASK0:s[0-9]+]], [[MASK0]], vcc_lo
236; GFX1064: s_or_b64 [[MASK0:s\[[0-9:]+\]]], [[MASK0]], vcc
237; GFX1032: s_andn2_b32 [[MASK1:s[0-9]+]], [[MASK1]], exec_lo
238; GFX1064: s_andn2_b64 [[MASK1:s\[[0-9:]+\]]], [[MASK1]], exec
239; GFX1032: s_and_b32 [[MASK0]], [[MASK0]], exec_lo
240; GFX1064: s_and_b64 [[MASK0]], [[MASK0]], exec
241; GFX1032: s_or_b32 [[MASK1]], [[MASK1]], [[MASK0]]
242; GFX1064: s_or_b64 [[MASK1]], [[MASK1]], [[MASK0]]
243; GCN:   .LBB{{.*}}: ; %Flow
244; GFX1032: s_and_b32 [[TMP0:s[0-9]+]], exec_lo, [[MASK1]]
245; GFX1064: s_and_b64 [[TMP0:s\[[0-9:]+\]]], exec, [[MASK1]]
246; GFX1032: s_or_b32  [[ACC:s[0-9]+]], [[TMP0]], [[ACC]]
247; GFX1064: s_or_b64  [[ACC:s\[[0-9:]+\]]], [[TMP0]], [[ACC]]
248; GFX1032: s_andn2_b32 exec_lo, exec_lo, [[ACC]]
249; GFX1064: s_andn2_b64 exec, exec, [[ACC]]
250; GCN:     s_cbranch_execz
251; GCN:   .LBB{{.*}}:
252
253; GFX1032-DAG: s_or_b32 [[MASK1]], [[MASK1]], exec_lo
254; GFX1064-DAG: s_or_b64 [[MASK1]], [[MASK1]], exec
255; GCN-DAG: global_load_dword [[LOAD:v[0-9]+]]
256; GFX1032: v_cmp_gt_i32_e32 vcc_lo, 11, [[LOAD]]
257; GFX1064: v_cmp_gt_i32_e32 vcc, 11, [[LOAD]]
258define amdgpu_kernel void @test_loop_with_if_else_break(i32 addrspace(1)* %arg) #0 {
259bb:
260  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
261  %tmp1 = icmp eq i32 %tmp, 0
262  br i1 %tmp1, label %.loopexit, label %.preheader
263
264.preheader:
265  br label %bb2
266
267bb2:
268  %tmp3 = phi i32 [ %tmp9, %bb8 ], [ 0, %.preheader ]
269  %tmp4 = zext i32 %tmp3 to i64
270  %tmp5 = getelementptr inbounds i32, i32 addrspace(1)* %arg, i64 %tmp4
271  %tmp6 = load i32, i32 addrspace(1)* %tmp5, align 4
272  %tmp7 = icmp sgt i32 %tmp6, 10
273  br i1 %tmp7, label %bb8, label %.loopexit
274
275bb8:
276  store i32 %tmp, i32 addrspace(1)* %tmp5, align 4
277  %tmp9 = add nuw nsw i32 %tmp3, 1
278  %tmp10 = icmp ult i32 %tmp9, 256
279  %tmp11 = icmp ult i32 %tmp9, %tmp
280  %tmp12 = and i1 %tmp10, %tmp11
281  br i1 %tmp12, label %bb2, label %.loopexit
282
283.loopexit:
284  ret void
285}
286
287; GCN-LABEL: {{^}}test_addc_vop2b:
288; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, s{{[0-9]+}}
289; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}, vcc_lo
290; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, s{{[0-9]+}}
291; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}}
292define amdgpu_kernel void @test_addc_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 {
293bb:
294  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
295  %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
296  %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8
297  %tmp5 = add nsw i64 %tmp4, %arg1
298  store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8
299  ret void
300}
301
302; GCN-LABEL: {{^}}test_subbrev_vop2b:
303; GFX1032: v_sub_co_u32 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], v{{[0-9]+}}, s{{[0-9]+}}{{$}}
304; GFX1032: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}}
305; GFX1064: v_sub_co_u32 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], v{{[0-9]+}}, s{{[0-9]+}}{{$}}
306; GFX1064: v_subrev_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, {{[vs][0-9]+}}, [[A0]]{{$}}
307define amdgpu_kernel void @test_subbrev_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 {
308bb:
309  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
310  %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
311  %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8
312  %tmp5 = sub nsw i64 %tmp4, %arg1
313  store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8
314  ret void
315}
316
317; GCN-LABEL: {{^}}test_subb_vop2b:
318; GFX1032: v_sub_co_u32 v{{[0-9]+}}, [[A0:s[0-9]+|vcc_lo]], s{{[0-9]+}}, v{{[0-9]+}}{{$}}
319; GFX1032: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}}
320; GFX1064: v_sub_co_u32 v{{[0-9]+}}, [[A0:s\[[0-9:]+\]|vcc]], s{{[0-9]+}}, v{{[0-9]+}}{{$}}
321; GFX1064: v_sub_co_ci_u32_e32 v{{[0-9]+}}, vcc, {{[vs][0-9]+}}, v{{[0-9]+}}, [[A0]]{{$}}
322define amdgpu_kernel void @test_subb_vop2b(i64 addrspace(1)* %arg, i64 %arg1) #0 {
323bb:
324  %tmp = tail call i32 @llvm.amdgcn.workitem.id.x()
325  %tmp3 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i32 %tmp
326  %tmp4 = load i64, i64 addrspace(1)* %tmp3, align 8
327  %tmp5 = sub nsw i64 %arg1, %tmp4
328  store i64 %tmp5, i64 addrspace(1)* %tmp3, align 8
329  ret void
330}
331
332; GCN-LABEL: {{^}}test_udiv64:
333; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}
334; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo
335; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}
336; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}, vcc_lo
337; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo
338; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}
339; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, 0, v{{[0-9]+}}, vcc_lo
340; GFX1032: v_add_co_u32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}
341; GFX1032: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc_lo, v{{[0-9]+}}, v{{[0-9]+}}, vcc_lo
342; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
343; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
344; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
345; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}}
346; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
347; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
348; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc{{$}}
349; GFX1064: v_add_co_u32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}
350; GFX1064: v_add_co_ci_u32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}}, vcc{{$}}
351define amdgpu_kernel void @test_udiv64(i64 addrspace(1)* %arg) #0 {
352bb:
353  %tmp = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 1
354  %tmp1 = load i64, i64 addrspace(1)* %tmp, align 8
355  %tmp2 = load i64, i64 addrspace(1)* %arg, align 8
356  %tmp3 = udiv i64 %tmp1, %tmp2
357  %tmp4 = getelementptr inbounds i64, i64 addrspace(1)* %arg, i64 2
358  store i64 %tmp3, i64 addrspace(1)* %tmp4, align 8
359  ret void
360}
361
362; GCN-LABEL: {{^}}test_div_scale_f32:
363; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
364; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
365define amdgpu_kernel void @test_div_scale_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 {
366  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
367  %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
368  %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
369
370  %a = load volatile float, float addrspace(1)* %gep.0, align 4
371  %b = load volatile float, float addrspace(1)* %gep.1, align 4
372
373  %result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false) nounwind readnone
374  %result0 = extractvalue { float, i1 } %result, 0
375  store float %result0, float addrspace(1)* %out, align 4
376  ret void
377}
378
379; GCN-LABEL: {{^}}test_div_scale_f64:
380; GFX1032: v_div_scale_f64 v[{{[0-9:]+}}], s{{[0-9]+}}, v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}]
381; GFX1064: v_div_scale_f64 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}]
382define amdgpu_kernel void @test_div_scale_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) #0 {
383  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
384  %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
385  %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
386
387  %a = load volatile double, double addrspace(1)* %gep.0, align 8
388  %b = load volatile double, double addrspace(1)* %gep.1, align 8
389
390  %result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true) nounwind readnone
391  %result0 = extractvalue { double, i1 } %result, 0
392  store double %result0, double addrspace(1)* %out, align 8
393  ret void
394}
395
396; GCN-LABEL: {{^}}test_mad_i64_i32:
397; GFX1032: v_mad_i64_i32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
398; GFX1064: v_mad_i64_i32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
399define i64 @test_mad_i64_i32(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
400  %sext0 = sext i32 %arg0 to i64
401  %sext1 = sext i32 %arg1 to i64
402  %mul = mul i64 %sext0, %sext1
403  %mad = add i64 %mul, %arg2
404  ret i64 %mad
405}
406
407; GCN-LABEL: {{^}}test_mad_u64_u32:
408; GFX1032: v_mad_u64_u32 v[{{[0-9:]+}}], s{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
409; GFX1064: v_mad_u64_u32 v[{{[0-9:]+}}], s[{{[0-9:]+}}], v{{[0-9]+}}, v{{[0-9]+}}, v[{{[0-9:]+}}]
410define i64 @test_mad_u64_u32(i32 %arg0, i32 %arg1, i64 %arg2) #0 {
411  %sext0 = zext i32 %arg0 to i64
412  %sext1 = zext i32 %arg1 to i64
413  %mul = mul i64 %sext0, %sext1
414  %mad = add i64 %mul, %arg2
415  ret i64 %mad
416}
417
418; GCN-LABEL: {{^}}test_div_fmas_f32:
419; GCN:     s_bitcmp1_b32 s{{[0-9]+}}, 0
420; GFX1032: s_cselect_b32 vcc_lo, -1, 0
421; GFX1064: s_cselect_b64 vcc, -1, 0
422; GCN:     v_div_fmas_f32 v{{[0-9]+}}, {{[vs][0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
423define amdgpu_kernel void @test_div_fmas_f32(float addrspace(1)* %out, float %a, float %b, float %c, i1 %d) nounwind {
424  %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %d) nounwind readnone
425  store float %result, float addrspace(1)* %out, align 4
426  ret void
427}
428
429; GCN-LABEL: {{^}}test_div_fmas_f64:
430; GCN:     s_bitcmp1_b32 s{{[0-9]+}}, 0
431; GFX1032: s_cselect_b32 vcc_lo, -1, 0
432; GFX1064: s_cselect_b64 vcc, -1, 0
433; GCN-DAG: v_div_fmas_f64 v[{{[0-9:]+}}], {{[vs]}}[{{[0-9:]+}}], v[{{[0-9:]+}}], v[{{[0-9:]+}}]
434define amdgpu_kernel void @test_div_fmas_f64(double addrspace(1)* %out, double %a, double %b, double %c, i1 %d) nounwind {
435  %result = call double @llvm.amdgcn.div.fmas.f64(double %a, double %b, double %c, i1 %d) nounwind readnone
436  store double %result, double addrspace(1)* %out, align 8
437  ret void
438}
439
440; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc:
441; GFX1032: s_mov_b32 [[VCC:vcc_lo]], 0{{$}}
442; GFX1064: s_mov_b64 [[VCC:vcc]], 0{{$}}
443; GFX1032: s_and_saveexec_b32 [[SAVE:s[0-9]+]], s{{[0-9]+}}{{$}}
444; GFX1064: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], s[{{[0-9:]+}}]{{$}}
445
446; GCN: load_dword [[LOAD:v[0-9]+]]
447; GCN: v_cmp_ne_u32_e32 [[VCC]], 0, [[LOAD]]
448
449; GCN: .LBB{{[0-9_]+}}:
450; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE]]
451; GFX1064: s_or_b64 exec, exec, [[SAVE]]
452; GCN: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
453define amdgpu_kernel void @test_div_fmas_f32_i1_phi_vcc(float addrspace(1)* %out, float addrspace(1)* %in, i32 addrspace(1)* %dummy) #0 {
454entry:
455  %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
456  %gep.out = getelementptr float, float addrspace(1)* %out, i32 2
457  %gep.a = getelementptr float, float addrspace(1)* %in, i32 %tid
458  %gep.b = getelementptr float, float addrspace(1)* %gep.a, i32 1
459  %gep.c = getelementptr float, float addrspace(1)* %gep.a, i32 2
460
461  %a = load float, float addrspace(1)* %gep.a
462  %b = load float, float addrspace(1)* %gep.b
463  %c = load float, float addrspace(1)* %gep.c
464
465  %cmp0 = icmp eq i32 %tid, 0
466  br i1 %cmp0, label %bb, label %exit
467
468bb:
469  %val = load volatile i32, i32 addrspace(1)* %dummy
470  %cmp1 = icmp ne i32 %val, 0
471  br label %exit
472
473exit:
474  %cond = phi i1 [false, %entry], [%cmp1, %bb]
475  %result = call float @llvm.amdgcn.div.fmas.f32(float %a, float %b, float %c, i1 %cond) nounwind readnone
476  store float %result, float addrspace(1)* %gep.out, align 4
477  ret void
478}
479
480; GCN-LABEL: {{^}}fdiv_f32:
481; GFX1032: v_div_scale_f32 v{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
482; GFX1064: v_div_scale_f32 v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
483; GCN: v_rcp_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}
484; GFX1032: v_div_scale_f32 v{{[0-9]+}}, vcc_lo, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
485; GFX1064: v_div_scale_f32 v{{[0-9]+}}, vcc, s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
486
487; GCN-NOT: vcc
488; GCN: v_div_fmas_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
489define amdgpu_kernel void @fdiv_f32(float addrspace(1)* %out, float %a, float %b) #0 {
490entry:
491  %fdiv = fdiv float %a, %b
492  store float %fdiv, float addrspace(1)* %out
493  ret void
494}
495
496; GCN-LABEL: {{^}}test_br_cc_f16:
497; GFX1032:  v_cmp_nlt_f16_e32 vcc_lo,
498; GFX1064:  v_cmp_nlt_f16_e32 vcc,
499; GCN-NEXT: s_cbranch_vccnz
500define amdgpu_kernel void @test_br_cc_f16(
501    half addrspace(1)* %r,
502    half addrspace(1)* %a,
503    half addrspace(1)* %b) {
504entry:
505  %a.val = load half, half addrspace(1)* %a
506  %b.val = load half, half addrspace(1)* %b
507  %fcmp = fcmp olt half %a.val, %b.val
508  br i1 %fcmp, label %one, label %two
509
510one:
511  store half %a.val, half addrspace(1)* %r
512  ret void
513
514two:
515  store half %b.val, half addrspace(1)* %r
516  ret void
517}
518
519; GCN-LABEL: {{^}}test_brcc_i1:
520; GCN:      s_bitcmp0_b32 s{{[0-9]+}}, 0
521; GCN-NEXT: s_cbranch_scc1
522define amdgpu_kernel void @test_brcc_i1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %in, i1 %val) #0 {
523  %cmp0 = icmp ne i1 %val, 0
524  br i1 %cmp0, label %store, label %end
525
526store:
527  store i32 222, i32 addrspace(1)* %out
528  ret void
529
530end:
531  ret void
532}
533
534; GCN-LABEL: {{^}}test_preserve_condition_undef_flag:
535; GFX1032-DAG: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0
536; GFX1032-DAG: v_cmp_ngt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 0
537; GFX1032: v_cmp_nlt_f32_e64 s{{[0-9]+}}, s{{[0-9]+}}, 1.0
538; GFX1032: s_or_b32 [[OR1:s[0-9]+]], s{{[0-9]+}}, s{{[0-9]+}}
539; GFX1032: s_or_b32 [[OR2:s[0-9]+]], [[OR1]], s{{[0-9]+}}
540; GFX1032: s_and_b32 vcc_lo, exec_lo, [[OR2]]
541; GFX1064-DAG: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0
542; GFX1064-DAG: v_cmp_ngt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 0
543; GFX1064: v_cmp_nlt_f32_e64 s[{{[0-9:]+}}], s{{[0-9]+}}, 1.0
544; GFX1064: s_or_b64 [[OR1:s\[[0-9:]+\]]], s[{{[0-9:]+}}], s[{{[0-9:]+}}]
545; GFX1064: s_or_b64 [[OR2:s\[[0-9:]+\]]], [[OR1]], s[{{[0-9:]+}}]
546; GFX1064: s_and_b64 vcc, exec, [[OR2]]
547; GCN:     s_cbranch_vccnz
548define amdgpu_kernel void @test_preserve_condition_undef_flag(float %arg, i32 %arg1, float %arg2) #0 {
549bb0:
550  %tmp = icmp sgt i32 %arg1, 4
551  %undef = call i1 @llvm.amdgcn.class.f32(float undef, i32 undef)
552  %tmp4 = select i1 %undef, float %arg, float 1.000000e+00
553  %tmp5 = fcmp ogt float %arg2, 0.000000e+00
554  %tmp6 = fcmp olt float %arg2, 1.000000e+00
555  %tmp7 = fcmp olt float %arg, %tmp4
556  %tmp8 = and i1 %tmp5, %tmp6
557  %tmp9 = and i1 %tmp8, %tmp7
558  br i1 %tmp9, label %bb1, label %bb2
559
560bb1:
561  store volatile i32 0, i32 addrspace(1)* undef
562  br label %bb2
563
564bb2:
565  ret void
566}
567
568; GCN-LABEL: {{^}}test_invert_true_phi_cond_break_loop:
569; GFX1032: s_xor_b32 s{{[0-9]+}}, s{{[0-9]+}}, -1
570; GFX1032: s_or_b32 s{{[0-9]+}}, s{{[0-9]+}}, s{{[0-9]+}}
571; GFX1064: s_xor_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], -1
572; GFX1064: s_or_b64 s[{{[0-9:]+}}], s[{{[0-9:]+}}], s[{{[0-9:]+}}]
573define amdgpu_kernel void @test_invert_true_phi_cond_break_loop(i32 %arg) #0 {
574bb:
575  %id = call i32 @llvm.amdgcn.workitem.id.x()
576  %tmp = sub i32 %id, %arg
577  br label %bb1
578
579bb1:                                              ; preds = %Flow, %bb
580  %lsr.iv = phi i32 [ undef, %bb ], [ %tmp2, %Flow ]
581  %lsr.iv.next = add i32 %lsr.iv, 1
582  %cmp0 = icmp slt i32 %lsr.iv.next, 0
583  br i1 %cmp0, label %bb4, label %Flow
584
585bb4:                                              ; preds = %bb1
586  %load = load volatile i32, i32 addrspace(1)* undef, align 4
587  %cmp1 = icmp sge i32 %tmp, %load
588  br label %Flow
589
590Flow:                                             ; preds = %bb4, %bb1
591  %tmp2 = phi i32 [ %lsr.iv.next, %bb4 ], [ undef, %bb1 ]
592  %tmp3 = phi i1 [ %cmp1, %bb4 ], [ true, %bb1 ]
593  br i1 %tmp3, label %bb1, label %bb9
594
595bb9:                                              ; preds = %Flow
596  store volatile i32 7, i32 addrspace(3)* undef
597  ret void
598}
599
600; GCN-LABEL: {{^}}test_movrels_extract_neg_offset_vgpr:
601; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 1, v{{[0-9]+}}
602; GFX1032: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc_lo
603; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 2, v{{[0-9]+}}
604; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc_lo
605; GFX1032: v_cmp_ne_u32_e32 vcc_lo, 3, v{{[0-9]+}}
606; GFX1032: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc_lo
607; GFX1064: v_cmp_eq_u32_e32 vcc, 1, v{{[0-9]+}}
608; GFX1064: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc
609; GFX1064: v_cmp_ne_u32_e32 vcc, 2, v{{[0-9]+}}
610; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 2, v{{[0-9]+}}, vcc
611; GFX1064: v_cmp_ne_u32_e32 vcc, 3, v{{[0-9]+}}
612; GFX1064: v_cndmask_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}, vcc
613define amdgpu_kernel void @test_movrels_extract_neg_offset_vgpr(i32 addrspace(1)* %out) #0 {
614entry:
615  %id = call i32 @llvm.amdgcn.workitem.id.x() #1
616  %index = add i32 %id, -512
617  %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
618  store i32 %value, i32 addrspace(1)* %out
619  ret void
620}
621
622; GCN-LABEL: {{^}}test_set_inactive:
623; GFX1032: s_not_b32 exec_lo, exec_lo
624; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 42
625; GFX1032: s_not_b32 exec_lo, exec_lo
626; GFX1064: s_not_b64 exec, exec{{$}}
627; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 42
628; GFX1064: s_not_b64 exec, exec{{$}}
629define amdgpu_kernel void @test_set_inactive(i32 addrspace(1)* %out, i32 %in) #0 {
630  %tmp = call i32 @llvm.amdgcn.set.inactive.i32(i32 %in, i32 42)
631  store i32 %tmp, i32 addrspace(1)* %out
632  ret void
633}
634
635; GCN-LABEL: {{^}}test_set_inactive_64:
636; GFX1032: s_not_b32 exec_lo, exec_lo
637; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0
638; GFX1032: v_mov_b32_e32 {{v[0-9]+}}, 0
639; GFX1032: s_not_b32 exec_lo, exec_lo
640; GFX1064: s_not_b64 exec, exec{{$}}
641; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0
642; GFX1064: v_mov_b32_e32 {{v[0-9]+}}, 0
643; GFX1064: s_not_b64 exec, exec{{$}}
644define amdgpu_kernel void @test_set_inactive_64(i64 addrspace(1)* %out, i64 %in) #0 {
645  %tmp = call i64 @llvm.amdgcn.set.inactive.i64(i64 %in, i64 0)
646  store i64 %tmp, i64 addrspace(1)* %out
647  ret void
648}
649
650; GCN-LABEL: {{^}}test_kill_i1_terminator_float:
651; GFX1032: s_mov_b32 exec_lo, 0
652; GFX1064: s_mov_b64 exec, 0
653define amdgpu_ps void @test_kill_i1_terminator_float() #0 {
654  call void @llvm.amdgcn.kill(i1 false)
655  ret void
656}
657
658; GCN-LABEL: {{^}}test_kill_i1_terminator_i1:
659; GFX1032: s_mov_b32 [[LIVE:s[0-9]+]], exec_lo
660; GFX1032: s_or_b32 [[OR:s[0-9]+]],
661; GFX1032: s_xor_b32 [[KILL:s[0-9]+]], [[OR]], exec_lo
662; GFX1032: s_andn2_b32 [[MASK:s[0-9]+]], [[LIVE]], [[KILL]]
663; GFX1032: s_and_b32 exec_lo, exec_lo, [[MASK]]
664; GFX1064: s_mov_b64 [[LIVE:s\[[0-9:]+\]]], exec
665; GFX1064: s_or_b64 [[OR:s\[[0-9:]+\]]],
666; GFX1064: s_xor_b64 [[KILL:s\[[0-9:]+\]]], [[OR]], exec
667; GFX1064: s_andn2_b64 [[MASK:s\[[0-9:]+\]]], [[LIVE]], [[KILL]]
668; GFX1064: s_and_b64 exec, exec, [[MASK]]
669define amdgpu_gs void @test_kill_i1_terminator_i1(i32 %a, i32 %b, i32 %c, i32 %d) #0 {
670  %c1 = icmp slt i32 %a, %b
671  %c2 = icmp slt i32 %c, %d
672  %x = or i1 %c1, %c2
673  call void @llvm.amdgcn.kill(i1 %x)
674  call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 false, i1 false)
675  ret void
676}
677
678; GCN-LABEL: {{^}}test_loop_vcc:
679; GFX1032: v_cmp_lt_f32_e32 vcc_lo,
680; GFX1064: v_cmp_lt_f32_e32 vcc,
681; GCN: s_cbranch_vccz
682define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) #0 {
683entry:
684  br label %loop
685
686loop:
687  %ctr.iv = phi float [ 0.0, %entry ], [ %ctr.next, %body ]
688  %c.iv = phi <4 x float> [ %in, %entry ], [ %c.next, %body ]
689  %cc = fcmp ogt float %ctr.iv, 7.0
690  br i1 %cc, label %break, label %body
691
692body:
693  %c.iv0 = extractelement <4 x float> %c.iv, i32 0
694  %c.next = call <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32 15, float %c.iv0, <8 x i32> undef, <4 x i32> undef, i1 0, i32 0, i32 0)
695  %ctr.next = fadd float %ctr.iv, 2.0
696  br label %loop
697
698break:
699  ret <4 x float> %c.iv
700}
701
702; NOTE: llvm.amdgcn.wwm is deprecated, use llvm.amdgcn.strict.wwm instead.
703; GCN-LABEL: {{^}}test_wwm1:
704; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1
705; GFX1032: s_mov_b32 exec_lo, [[SAVE]]
706; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1
707; GFX1064: s_mov_b64 exec, [[SAVE]]
708define amdgpu_ps float @test_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) {
709main_body:
710  %out = fadd float %src0, %src1
711  %out.0 = call float @llvm.amdgcn.wwm.f32(float %out)
712  ret float %out.0
713}
714
715; GCN-LABEL: {{^}}test_wwm2:
716; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 16, v{{[0-9]+}}
717; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo
718; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1
719; GFX1032: s_mov_b32 exec_lo, [[SAVE2]]
720; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]]
721; GFX1064: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
722; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}}
723; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1
724; GFX1064: s_mov_b64 exec, [[SAVE2]]
725; GFX1064: s_or_b64 exec, exec, [[SAVE1]]
726define amdgpu_ps float @test_wwm2(i32 inreg %idx) {
727main_body:
728  ; use mbcnt to make sure the branch is divergent
729  %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
730  %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo)
731  %cc = icmp uge i32 %hi, 16
732  br i1 %cc, label %endif, label %if
733
734if:
735  %src = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0, i32 0)
736  %out = fadd float %src, %src
737  %out.0 = call float @llvm.amdgcn.wwm.f32(float %out)
738  %out.1 = fadd float %src, %out.0
739  br label %endif
740
741endif:
742  %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ]
743  ret float %out.2
744}
745
746; GCN-LABEL: {{^}}test_strict_wwm1:
747; GFX1032: s_or_saveexec_b32 [[SAVE:s[0-9]+]], -1
748; GFX1032: s_mov_b32 exec_lo, [[SAVE]]
749; GFX1064: s_or_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], -1
750; GFX1064: s_mov_b64 exec, [[SAVE]]
751define amdgpu_ps float @test_strict_wwm1(i32 inreg %idx0, i32 inreg %idx1, float %src0, float %src1) {
752main_body:
753  %out = fadd float %src0, %src1
754  %out.0 = call float @llvm.amdgcn.strict.wwm.f32(float %out)
755  ret float %out.0
756}
757
758; GCN-LABEL: {{^}}test_strict_wwm2:
759; GFX1032: v_cmp_gt_u32_e32 vcc_lo, 16, v{{[0-9]+}}
760; GFX1032: s_and_saveexec_b32 [[SAVE1:s[0-9]+]], vcc_lo
761; GFX1032: s_or_saveexec_b32 [[SAVE2:s[0-9]+]], -1
762; GFX1032: s_mov_b32 exec_lo, [[SAVE2]]
763; GFX1032: s_or_b32 exec_lo, exec_lo, [[SAVE1]]
764; GFX1064: v_cmp_gt_u32_e32 vcc, 16, v{{[0-9]+}}
765; GFX1064: s_and_saveexec_b64 [[SAVE1:s\[[0-9:]+\]]], vcc{{$}}
766; GFX1064: s_or_saveexec_b64 [[SAVE2:s\[[0-9:]+\]]], -1
767; GFX1064: s_mov_b64 exec, [[SAVE2]]
768; GFX1064: s_or_b64 exec, exec, [[SAVE1]]
769define amdgpu_ps float @test_strict_wwm2(i32 inreg %idx) {
770main_body:
771  ; use mbcnt to make sure the branch is divergent
772  %lo = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0)
773  %hi = call i32 @llvm.amdgcn.mbcnt.hi(i32 -1, i32 %lo)
774  %cc = icmp uge i32 %hi, 16
775  br i1 %cc, label %endif, label %if
776
777if:
778  %src = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx, i32 0, i32 0, i32 0)
779  %out = fadd float %src, %src
780  %out.0 = call float @llvm.amdgcn.strict.wwm.f32(float %out)
781  %out.1 = fadd float %src, %out.0
782  br label %endif
783
784endif:
785  %out.2 = phi float [ %out.1, %if ], [ 0.0, %main_body ]
786  ret float %out.2
787}
788
789
790; GCN-LABEL: {{^}}test_wqm1:
791; GFX1032: s_mov_b32 [[ORIG:s[0-9]+]], exec_lo
792; GFX1032: s_wqm_b32 exec_lo, exec_lo
793; GFX1032: s_and_b32 exec_lo, exec_lo, [[ORIG]]
794; GFX1064: s_mov_b64 [[ORIG:s\[[0-9]+:[0-9]+\]]], exec{{$}}
795; GFX1064: s_wqm_b64 exec, exec{{$}}
796; GFX1064: s_and_b64 exec, exec, [[ORIG]]
797define amdgpu_ps <4 x float> @test_wqm1(i32 inreg, i32 inreg, i32 inreg, i32 inreg %m0, <8 x i32> inreg %rsrc, <4 x i32> inreg %sampler, <2 x float> %pos) #0 {
798main_body:
799  %inst23 = extractelement <2 x float> %pos, i32 0
800  %inst24 = extractelement <2 x float> %pos, i32 1
801  %inst25 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 0, i32 0, i32 %m0)
802  %inst26 = tail call float @llvm.amdgcn.interp.p2(float %inst25, float %inst24, i32 0, i32 0, i32 %m0)
803  %inst28 = tail call float @llvm.amdgcn.interp.p1(float %inst23, i32 1, i32 0, i32 %m0)
804  %inst29 = tail call float @llvm.amdgcn.interp.p2(float %inst28, float %inst24, i32 1, i32 0, i32 %m0)
805  %tex = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float %inst26, float %inst29, <8 x i32> %rsrc, <4 x i32> %sampler, i1 0, i32 0, i32 0)
806  ret <4 x float> %tex
807}
808
809; GCN-LABEL: {{^}}test_wqm2:
810; GFX1032: s_wqm_b32 exec_lo, exec_lo
811; GFX1032: s_and_b32 exec_lo, exec_lo, s{{[0-9]+}}
812; GFX1064: s_wqm_b64 exec, exec{{$}}
813; GFX1064: s_and_b64 exec, exec, s[{{[0-9:]+}}]
814define amdgpu_ps float @test_wqm2(i32 inreg %idx0, i32 inreg %idx1) #0 {
815main_body:
816  %src0 = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx0, i32 0, i32 0, i32 0)
817  %src1 = call float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32> undef, i32 %idx1, i32 0, i32 0, i32 0)
818  %out = fadd float %src0, %src1
819  %out.0 = bitcast float %out to i32
820  %out.1 = call i32 @llvm.amdgcn.wqm.i32(i32 %out.0)
821  %out.2 = bitcast i32 %out.1 to float
822  ret float %out.2
823}
824
825; GCN-LABEL: {{^}}test_intr_fcmp_i64:
826; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}}
827; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
828; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
829; GFX1064:     v_cmp_eq_f32_e64 s[[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
830; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
831; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]]
832; GCN:         store_dwordx2 v{{[0-9]+}}, v[[[V_LO]]:[[V_HI]]], s
833define amdgpu_kernel void @test_intr_fcmp_i64(i64 addrspace(1)* %out, float %src, float %a) {
834  %temp = call float @llvm.fabs.f32(float %a)
835  %result = call i64 @llvm.amdgcn.fcmp.i64.f32(float %src, float %temp, i32 1)
836  store i64 %result, i64 addrspace(1)* %out
837  ret void
838}
839
840; GCN-LABEL: {{^}}test_intr_icmp_i64:
841; GFX1032-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], 0{{$}}
842; GFX1032-DAG: v_cmp_eq_u32_e64 [[C_LO:vcc_lo|s[0-9]+]], 0x64, {{s[0-9]+}}
843; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], [[C_LO]]
844; GFX1064:     v_cmp_eq_u32_e64 s[[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], 0x64, {{s[0-9]+}}
845; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
846; GFX1064-DAG: v_mov_b32_e32 v[[V_HI:[0-9]+]], s[[C_HI]]
847; GCN:         store_dwordx2 v{{[0-9]+}}, v[[[V_LO]]:[[V_HI]]], s
848define amdgpu_kernel void @test_intr_icmp_i64(i64 addrspace(1)* %out, i32 %src) {
849  %result = call i64 @llvm.amdgcn.icmp.i64.i32(i32 %src, i32 100, i32 32)
850  store i64 %result, i64 addrspace(1)* %out
851  ret void
852}
853
854; GCN-LABEL: {{^}}test_intr_fcmp_i32:
855; GFX1032-DAG: v_cmp_eq_f32_e64 s[[C_LO:[0-9]+]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
856; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
857; GFX1064:     v_cmp_eq_f32_e64 s[[[C_LO:[0-9]+]]:[[C_HI:[0-9]+]]], {{s[0-9]+}}, |{{[vs][0-9]+}}|
858; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]
859; GCN:         store_dword v{{[0-9]+}}, v[[V_LO]], s
860define amdgpu_kernel void @test_intr_fcmp_i32(i32 addrspace(1)* %out, float %src, float %a) {
861  %temp = call float @llvm.fabs.f32(float %a)
862  %result = call i32 @llvm.amdgcn.fcmp.i32.f32(float %src, float %temp, i32 1)
863  store i32 %result, i32 addrspace(1)* %out
864  ret void
865}
866
867; GCN-LABEL: {{^}}test_intr_icmp_i32:
868; GFX1032-DAG: v_cmp_eq_u32_e64 s[[C_LO:[0-9]+]], 0x64, {{s[0-9]+}}
869; GFX1032-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}}
870; GFX1064:     v_cmp_eq_u32_e64 s[[[C_LO:[0-9]+]]:{{[0-9]+}}], 0x64, {{s[0-9]+}}
871; GFX1064-DAG: v_mov_b32_e32 v[[V_LO:[0-9]+]], s[[C_LO]]{{$}}
872; GCN:         store_dword v{{[0-9]+}}, v[[V_LO]], s
873define amdgpu_kernel void @test_intr_icmp_i32(i32 addrspace(1)* %out, i32 %src) {
874  %result = call i32 @llvm.amdgcn.icmp.i32.i32(i32 %src, i32 100, i32 32)
875  store i32 %result, i32 addrspace(1)* %out
876  ret void
877}
878
879; GCN-LABEL: {{^}}test_wqm_vote:
880; GFX1032: v_cmp_neq_f32_e32 vcc_lo, 0
881; GFX1032: s_mov_b32 [[LIVE:s[0-9]+]], exec_lo
882; GFX1032: s_wqm_b32 [[WQM:s[0-9]+]], vcc_lo
883; GFX1032: s_xor_b32 [[KILL:s[0-9]+]], [[WQM]], exec_lo
884; GFX1032: s_andn2_b32 [[MASK:s[0-9]+]], [[LIVE]], [[KILL]]
885; GFX1032: s_and_b32 exec_lo, exec_lo, [[MASK]]
886; GFX1064: v_cmp_neq_f32_e32 vcc, 0
887; GFX1064: s_mov_b64 [[LIVE:s\[[0-9:]+\]]], exec
888; GFX1064: s_wqm_b64 [[WQM:s\[[0-9:]+\]]], vcc
889; GFX1064: s_xor_b64 [[KILL:s\[[0-9:]+\]]], [[WQM]], exec
890; GFX1064: s_andn2_b64 [[MASK:s\[[0-9:]+\]]], [[LIVE]], [[KILL]]
891; GFX1064: s_and_b64 exec, exec, [[MASK]]
892define amdgpu_ps void @test_wqm_vote(float %a) {
893  %c1 = fcmp une float %a, 0.0
894  %c2 = call i1 @llvm.amdgcn.wqm.vote(i1 %c1)
895  call void @llvm.amdgcn.kill(i1 %c2)
896  call void @llvm.amdgcn.exp.f32(i32 0, i32 0, float 0.0, float 0.0, float 0.0, float 0.0, i1 false, i1 false)
897  ret void
898}
899
900; GCN-LABEL: {{^}}test_branch_true:
901; GFX1032: s_mov_b32 vcc_lo, exec_lo
902; GFX1064: s_mov_b64 vcc, exec
903define amdgpu_kernel void @test_branch_true() #2 {
904entry:
905  br i1 true, label %for.end, label %for.body.lr.ph
906
907for.body.lr.ph:                                   ; preds = %entry
908  br label %for.body
909
910for.body:                                         ; preds = %for.body, %for.body.lr.ph
911  br i1 undef, label %for.end, label %for.body
912
913for.end:                                          ; preds = %for.body, %entry
914  ret void
915}
916
917; GCN-LABEL: {{^}}test_ps_live:
918; GFX1032: s_mov_b32 [[C:s[0-9]+]], exec_lo
919; GFX1064: s_mov_b64 [[C:s\[[0-9:]+\]]], exec{{$}}
920; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[C]]
921define amdgpu_ps float @test_ps_live() #0 {
922  %live = call i1 @llvm.amdgcn.ps.live()
923  %live.32 = zext i1 %live to i32
924  %r = bitcast i32 %live.32 to float
925  ret float %r
926}
927
928; GCN-LABEL: {{^}}test_vccnz_ifcvt_triangle64:
929; GFX1032: v_cmp_neq_f64_e64 [[C:s[0-9]+]], s[{{[0-9:]+}}], 1.0
930; GFX1032: s_and_b32 vcc_lo, exec_lo, [[C]]
931; GFX1064: v_cmp_neq_f64_e64 [[C:s\[[0-9:]+\]]], s[{{[0-9:]+}}], 1.0
932; GFX1064: s_and_b64 vcc, exec, [[C]]
933define amdgpu_kernel void @test_vccnz_ifcvt_triangle64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
934entry:
935  %v = load double, double addrspace(1)* %in
936  %cc = fcmp oeq double %v, 1.000000e+00
937  br i1 %cc, label %if, label %endif
938
939if:
940  %u = fadd double %v, %v
941  br label %endif
942
943endif:
944  %r = phi double [ %v, %entry ], [ %u, %if ]
945  store double %r, double addrspace(1)* %out
946  ret void
947}
948
949; GCN-LABEL: {{^}}test_vgprblocks_w32_attr:
950; Test that the wave size can be overridden in function attributes and that the block size is correct as a result
951; GFX10DEFWAVE: ; VGPRBlocks: 1
952define amdgpu_gs float @test_vgprblocks_w32_attr(float %a, float %b, float %c, float %d, float %e,
953                                        float %f, float %g, float %h, float %i, float %j, float %k, float %l) #3 {
954main_body:
955  %s = fadd float %a, %b
956  %s.1 = fadd float %s, %c
957  %s.2 = fadd float %s.1, %d
958  %s.3 = fadd float %s.2, %e
959  %s.4 = fadd float %s.3, %f
960  %s.5 = fadd float %s.4, %g
961  %s.6 = fadd float %s.5, %h
962  %s.7 = fadd float %s.6, %i
963  %s.8 = fadd float %s.7, %j
964  %s.9 = fadd float %s.8, %k
965  %s.10 = fadd float %s.9, %l
966  ret float %s.10
967}
968
969; GCN-LABEL: {{^}}test_vgprblocks_w64_attr:
970; Test that the wave size can be overridden in function attributes and that the block size is correct as a result
971; GFX10DEFWAVE: ; VGPRBlocks: 2
972define amdgpu_gs float @test_vgprblocks_w64_attr(float %a, float %b, float %c, float %d, float %e,
973                                        float %f, float %g, float %h, float %i, float %j, float %k, float %l) #4 {
974main_body:
975  %s = fadd float %a, %b
976  %s.1 = fadd float %s, %c
977  %s.2 = fadd float %s.1, %d
978  %s.3 = fadd float %s.2, %e
979  %s.4 = fadd float %s.3, %f
980  %s.5 = fadd float %s.4, %g
981  %s.6 = fadd float %s.5, %h
982  %s.7 = fadd float %s.6, %i
983  %s.8 = fadd float %s.7, %j
984  %s.9 = fadd float %s.8, %k
985  %s.10 = fadd float %s.9, %l
986  ret float %s.10
987}
988
989; GCN-LABEL: {{^}}icmp64:
990; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v
991; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v
992define amdgpu_kernel void @icmp64(i32 %n, i32 %s) {
993entry:
994  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
995  %mul4 = mul nsw i32 %s, %n
996  %cmp = icmp slt i32 0, %mul4
997  br label %if.end
998
999if.end:                                           ; preds = %entry
1000  %rem = urem i32 %id, %s
1001  %icmp = tail call i64 @llvm.amdgcn.icmp.i64.i32(i32 %rem, i32 0, i32 32)
1002  %shr = lshr i64 %icmp, 1
1003  %notmask = shl nsw i64 -1, 0
1004  %and = and i64 %notmask, %shr
1005  %or = or i64 %and, -9223372036854775808
1006  %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true)
1007  %cast = trunc i64 %cttz to i32
1008  %cmp3 = icmp ugt i32 10, %cast
1009  %cmp6 = icmp ne i32 %rem, 0
1010  %brmerge = or i1 %cmp6, %cmp3
1011  br i1 %brmerge, label %if.end2, label %if.then
1012
1013if.then:                                          ; preds = %if.end
1014  unreachable
1015
1016if.end2:                                          ; preds = %if.end
1017  ret void
1018}
1019
1020; GCN-LABEL: {{^}}fcmp64:
1021; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v
1022; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v
1023define amdgpu_kernel void @fcmp64(float %n, float %s) {
1024entry:
1025  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
1026  %id.f = uitofp i32 %id to float
1027  %mul4 = fmul float %s, %n
1028  %cmp = fcmp ult float 0.0, %mul4
1029  br label %if.end
1030
1031if.end:                                           ; preds = %entry
1032  %rem.f = frem float %id.f, %s
1033  %fcmp = tail call i64 @llvm.amdgcn.fcmp.i64.f32(float %rem.f, float 0.0, i32 1)
1034  %shr = lshr i64 %fcmp, 1
1035  %notmask = shl nsw i64 -1, 0
1036  %and = and i64 %notmask, %shr
1037  %or = or i64 %and, -9223372036854775808
1038  %cttz = tail call i64 @llvm.cttz.i64(i64 %or, i1 true)
1039  %cast = trunc i64 %cttz to i32
1040  %cmp3 = icmp ugt i32 10, %cast
1041  %cmp6 = fcmp one float %rem.f, 0.0
1042  %brmerge = or i1 %cmp6, %cmp3
1043  br i1 %brmerge, label %if.end2, label %if.then
1044
1045if.then:                                          ; preds = %if.end
1046  unreachable
1047
1048if.end2:                                          ; preds = %if.end
1049  ret void
1050}
1051
1052; GCN-LABEL: {{^}}icmp32:
1053; GFX1032: v_cmp_eq_u32_e32 vcc_lo, 0, v
1054; GFX1064: v_cmp_eq_u32_e32 vcc, 0, v
1055define amdgpu_kernel void @icmp32(i32 %n, i32 %s) {
1056entry:
1057  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
1058  %mul4 = mul nsw i32 %s, %n
1059  %cmp = icmp slt i32 0, %mul4
1060  br label %if.end
1061
1062if.end:                                           ; preds = %entry
1063  %rem = urem i32 %id, %s
1064  %icmp = tail call i32 @llvm.amdgcn.icmp.i32.i32(i32 %rem, i32 0, i32 32)
1065  %shr = lshr i32 %icmp, 1
1066  %notmask = shl nsw i32 -1, 0
1067  %and = and i32 %notmask, %shr
1068  %or = or i32 %and, 2147483648
1069  %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true)
1070  %cmp3 = icmp ugt i32 10, %cttz
1071  %cmp6 = icmp ne i32 %rem, 0
1072  %brmerge = or i1 %cmp6, %cmp3
1073  br i1 %brmerge, label %if.end2, label %if.then
1074
1075if.then:                                          ; preds = %if.end
1076  unreachable
1077
1078if.end2:                                          ; preds = %if.end
1079  ret void
1080}
1081
1082; GCN-LABEL: {{^}}fcmp32:
1083; GFX1032: v_cmp_eq_f32_e32 vcc_lo, 0, v
1084; GFX1064: v_cmp_eq_f32_e32 vcc, 0, v
1085define amdgpu_kernel void @fcmp32(float %n, float %s) {
1086entry:
1087  %id = tail call i32 @llvm.amdgcn.workitem.id.x()
1088  %id.f = uitofp i32 %id to float
1089  %mul4 = fmul float %s, %n
1090  %cmp = fcmp ult float 0.0, %mul4
1091  br label %if.end
1092
1093if.end:                                           ; preds = %entry
1094  %rem.f = frem float %id.f, %s
1095  %fcmp = tail call i32 @llvm.amdgcn.fcmp.i32.f32(float %rem.f, float 0.0, i32 1)
1096  %shr = lshr i32 %fcmp, 1
1097  %notmask = shl nsw i32 -1, 0
1098  %and = and i32 %notmask, %shr
1099  %or = or i32 %and, 2147483648
1100  %cttz = tail call i32 @llvm.cttz.i32(i32 %or, i1 true)
1101  %cmp3 = icmp ugt i32 10, %cttz
1102  %cmp6 = fcmp one float %rem.f, 0.0
1103  %brmerge = or i1 %cmp6, %cmp3
1104  br i1 %brmerge, label %if.end2, label %if.then
1105
1106if.then:                                          ; preds = %if.end
1107  unreachable
1108
1109if.end2:                                          ; preds = %if.end
1110  ret void
1111}
1112
1113declare void @external_void_func_void() #1
1114
1115; Test save/restore of VGPR needed for SGPR spilling.
1116
1117; GCN-LABEL: {{^}}callee_no_stack_with_call:
1118; GCN: s_waitcnt
1119; GCN-NEXT: s_waitcnt_vscnt
1120
1121; GFX1064-NEXT: s_or_saveexec_b64 [[COPY_EXEC0:s\[[0-9]+:[0-9]+\]]], -1{{$}}
1122; GFX1032-NEXT: s_or_saveexec_b32 [[COPY_EXEC0:s[0-9]+]], -1{{$}}
1123; GCN-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
1124; GCN-NEXT: s_waitcnt_depctr 0xffe3
1125; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC0]]
1126; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC0]]
1127
1128; GCN-NEXT: v_writelane_b32 v40, s33, 2
1129; GCN: s_mov_b32 s33, s32
1130; GFX1064: s_addk_i32 s32, 0x400
1131; GFX1032: s_addk_i32 s32, 0x200
1132
1133
1134; GCN-DAG: v_writelane_b32 v40, s30, 0
1135; GCN-DAG: v_writelane_b32 v40, s31, 1
1136; GCN: s_swappc_b64
1137; GCN-DAG: v_readlane_b32 s30, v40, 0
1138; GCN-DAG: v_readlane_b32 s31, v40, 1
1139
1140
1141; GFX1064: s_addk_i32 s32, 0xfc00
1142; GFX1032: s_addk_i32 s32, 0xfe00
1143; GCN: v_readlane_b32 s33, v40, 2
1144; GFX1064: s_or_saveexec_b64 [[COPY_EXEC1:s\[[0-9]+:[0-9]+\]]], -1{{$}}
1145; GFX1032: s_or_saveexec_b32 [[COPY_EXEC1:s[0-9]]], -1{{$}}
1146; GCN-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
1147; GCN-NEXT: s_waitcnt_depctr 0xffe3
1148; GFX1064-NEXT: s_mov_b64 exec, [[COPY_EXEC1]]
1149; GFX1032-NEXT: s_mov_b32 exec_lo, [[COPY_EXEC1]]
1150; GCN-NEXT: s_waitcnt vmcnt(0)
1151; GCN-NEXT: s_setpc_b64
1152define void @callee_no_stack_with_call() #1 {
1153  call void @external_void_func_void()
1154  ret void
1155}
1156
1157
1158declare i32 @llvm.amdgcn.workitem.id.x()
1159declare float @llvm.fabs.f32(float)
1160declare { float, i1 } @llvm.amdgcn.div.scale.f32(float, float, i1)
1161declare { double, i1 } @llvm.amdgcn.div.scale.f64(double, double, i1)
1162declare float @llvm.amdgcn.div.fmas.f32(float, float, float, i1)
1163declare double @llvm.amdgcn.div.fmas.f64(double, double, double, i1)
1164declare i1 @llvm.amdgcn.class.f32(float, i32)
1165declare i32 @llvm.amdgcn.set.inactive.i32(i32, i32)
1166declare i64 @llvm.amdgcn.set.inactive.i64(i64, i64)
1167declare <4 x float> @llvm.amdgcn.image.sample.1d.v4f32.f32(i32, float, <8 x i32>, <4 x i32>, i1, i32, i32)
1168declare <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32, float, float, <8 x i32>, <4 x i32>, i1, i32, i32)
1169declare float @llvm.amdgcn.strict.wwm.f32(float)
1170declare float @llvm.amdgcn.wwm.f32(float)
1171declare i32 @llvm.amdgcn.wqm.i32(i32)
1172declare float @llvm.amdgcn.interp.p1(float, i32, i32, i32)
1173declare float @llvm.amdgcn.interp.p2(float, float, i32, i32, i32)
1174declare float @llvm.amdgcn.struct.buffer.load.f32(<4 x i32>, i32, i32, i32, i32 immarg)
1175declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32)
1176declare i32 @llvm.amdgcn.mbcnt.hi(i32, i32)
1177declare i64 @llvm.amdgcn.fcmp.i64.f32(float, float, i32)
1178declare i64 @llvm.amdgcn.icmp.i64.i32(i32, i32, i32)
1179declare i32 @llvm.amdgcn.fcmp.i32.f32(float, float, i32)
1180declare i32 @llvm.amdgcn.icmp.i32.i32(i32, i32, i32)
1181declare void @llvm.amdgcn.kill(i1)
1182declare i1 @llvm.amdgcn.wqm.vote(i1)
1183declare i1 @llvm.amdgcn.ps.live()
1184declare i64 @llvm.cttz.i64(i64, i1)
1185declare i32 @llvm.cttz.i32(i32, i1)
1186declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #5
1187
1188attributes #0 = { nounwind readnone speculatable }
1189attributes #1 = { nounwind }
1190attributes #2 = { nounwind readnone optnone noinline }
1191attributes #3 = { "target-features"="+wavefrontsize32" }
1192attributes #4 = { "target-features"="+wavefrontsize64" }
1193attributes #5 = { inaccessiblememonly nounwind }
1194