1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck --check-prefix=GCN %s
3; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX8 %s
4; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX9 %s
5
6declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
7declare i32 @llvm.amdgcn.workitem.id.y() nounwind readnone
8
9define amdgpu_kernel void @anyext_i1_i32(i32 addrspace(1)* %out, i32 %cond) #0 {
10; GCN-LABEL: anyext_i1_i32:
11; GCN:       ; %bb.0: ; %entry
12; GCN-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x9
13; GCN-NEXT:    s_load_dword s0, s[0:1], 0xb
14; GCN-NEXT:    s_mov_b32 s7, 0xf000
15; GCN-NEXT:    s_mov_b32 s6, -1
16; GCN-NEXT:    s_waitcnt lgkmcnt(0)
17; GCN-NEXT:    s_cmp_lg_u32 s0, 0
18; GCN-NEXT:    s_cselect_b64 s[0:1], -1, 0
19; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
20; GCN-NEXT:    buffer_store_dword v0, off, s[4:7], 0
21; GCN-NEXT:    s_endpgm
22;
23; GFX8-LABEL: anyext_i1_i32:
24; GFX8:       ; %bb.0: ; %entry
25; GFX8-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
26; GFX8-NEXT:    s_load_dword s0, s[0:1], 0x2c
27; GFX8-NEXT:    s_mov_b32 s7, 0xf000
28; GFX8-NEXT:    s_mov_b32 s6, -1
29; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
30; GFX8-NEXT:    s_cmp_eq_u32 s0, 0
31; GFX8-NEXT:    s_cselect_b64 s[0:1], -1, 0
32; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
33; GFX8-NEXT:    v_not_b32_e32 v0, v0
34; GFX8-NEXT:    v_and_b32_e32 v0, 1, v0
35; GFX8-NEXT:    buffer_store_dword v0, off, s[4:7], 0
36; GFX8-NEXT:    s_endpgm
37;
38; GFX9-LABEL: anyext_i1_i32:
39; GFX9:       ; %bb.0: ; %entry
40; GFX9-NEXT:    s_load_dwordx2 s[4:5], s[0:1], 0x24
41; GFX9-NEXT:    s_load_dword s2, s[0:1], 0x2c
42; GFX9-NEXT:    s_mov_b32 s7, 0xf000
43; GFX9-NEXT:    s_mov_b32 s6, -1
44; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
45; GFX9-NEXT:    s_cmp_eq_u32 s2, 0
46; GFX9-NEXT:    s_cselect_b64 s[0:1], -1, 0
47; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[0:1]
48; GFX9-NEXT:    v_not_b32_e32 v0, v0
49; GFX9-NEXT:    v_and_b32_e32 v0, 1, v0
50; GFX9-NEXT:    buffer_store_dword v0, off, s[4:7], 0
51; GFX9-NEXT:    s_endpgm
52entry:
53  %tmp = icmp eq i32 %cond, 0
54  %tmp1 = zext i1 %tmp to i8
55  %tmp2 = xor i8 %tmp1, -1
56  %tmp3 = and i8 %tmp2, 1
57  %tmp4 = zext i8 %tmp3 to i32
58  store i32 %tmp4, i32 addrspace(1)* %out
59  ret void
60}
61
62define amdgpu_kernel void @s_anyext_i16_i32(i32 addrspace(1)* %out, i16 addrspace(1)* %a, i16 addrspace(1)* %b) #0 {
63; GCN-LABEL: s_anyext_i16_i32:
64; GCN:       ; %bb.0: ; %entry
65; GCN-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
66; GCN-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
67; GCN-NEXT:    s_mov_b32 s11, 0xf000
68; GCN-NEXT:    s_mov_b32 s14, 0
69; GCN-NEXT:    s_mov_b32 s15, s11
70; GCN-NEXT:    s_waitcnt lgkmcnt(0)
71; GCN-NEXT:    s_mov_b64 s[12:13], s[6:7]
72; GCN-NEXT:    v_lshlrev_b32_e32 v2, 1, v0
73; GCN-NEXT:    v_mov_b32_e32 v3, 0
74; GCN-NEXT:    v_lshlrev_b32_e32 v0, 1, v1
75; GCN-NEXT:    s_mov_b64 s[2:3], s[14:15]
76; GCN-NEXT:    v_mov_b32_e32 v1, v3
77; GCN-NEXT:    buffer_load_ushort v2, v[2:3], s[12:15], 0 addr64
78; GCN-NEXT:    buffer_load_ushort v0, v[0:1], s[0:3], 0 addr64
79; GCN-NEXT:    s_mov_b32 s10, -1
80; GCN-NEXT:    s_mov_b32 s8, s4
81; GCN-NEXT:    s_mov_b32 s9, s5
82; GCN-NEXT:    s_waitcnt vmcnt(0)
83; GCN-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
84; GCN-NEXT:    v_not_b32_e32 v0, v0
85; GCN-NEXT:    v_and_b32_e32 v0, 1, v0
86; GCN-NEXT:    buffer_store_dword v0, off, s[8:11], 0
87; GCN-NEXT:    s_endpgm
88;
89; GFX8-LABEL: s_anyext_i16_i32:
90; GFX8:       ; %bb.0: ; %entry
91; GFX8-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
92; GFX8-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
93; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
94; GFX8-NEXT:    s_mov_b32 s3, 0xf000
95; GFX8-NEXT:    s_mov_b32 s2, -1
96; GFX8-NEXT:    s_waitcnt lgkmcnt(0)
97; GFX8-NEXT:    v_mov_b32_e32 v3, s7
98; GFX8-NEXT:    v_add_u32_e32 v2, vcc, s6, v0
99; GFX8-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
100; GFX8-NEXT:    v_lshlrev_b32_e32 v0, 1, v1
101; GFX8-NEXT:    v_mov_b32_e32 v1, s9
102; GFX8-NEXT:    v_add_u32_e32 v0, vcc, s8, v0
103; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
104; GFX8-NEXT:    flat_load_ushort v2, v[2:3]
105; GFX8-NEXT:    flat_load_ushort v0, v[0:1]
106; GFX8-NEXT:    s_mov_b32 s0, s4
107; GFX8-NEXT:    s_mov_b32 s1, s5
108; GFX8-NEXT:    s_waitcnt vmcnt(0)
109; GFX8-NEXT:    v_add_u16_e32 v0, v2, v0
110; GFX8-NEXT:    v_xor_b32_e32 v0, -1, v0
111; GFX8-NEXT:    v_and_b32_e32 v0, 1, v0
112; GFX8-NEXT:    v_and_b32_e32 v0, 0xffff, v0
113; GFX8-NEXT:    buffer_store_dword v0, off, s[0:3], 0
114; GFX8-NEXT:    s_endpgm
115;
116; GFX9-LABEL: s_anyext_i16_i32:
117; GFX9:       ; %bb.0: ; %entry
118; GFX9-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
119; GFX9-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0x34
120; GFX9-NEXT:    v_lshlrev_b32_e32 v0, 1, v0
121; GFX9-NEXT:    v_lshlrev_b32_e32 v1, 1, v1
122; GFX9-NEXT:    s_mov_b32 s3, 0xf000
123; GFX9-NEXT:    s_mov_b32 s2, -1
124; GFX9-NEXT:    s_waitcnt lgkmcnt(0)
125; GFX9-NEXT:    global_load_ushort v2, v0, s[6:7]
126; GFX9-NEXT:    global_load_ushort v3, v1, s[8:9]
127; GFX9-NEXT:    s_mov_b32 s0, s4
128; GFX9-NEXT:    s_mov_b32 s1, s5
129; GFX9-NEXT:    s_waitcnt vmcnt(0)
130; GFX9-NEXT:    v_add_u16_e32 v0, v2, v3
131; GFX9-NEXT:    v_xor_b32_e32 v0, -1, v0
132; GFX9-NEXT:    v_and_b32_e32 v0, 1, v0
133; GFX9-NEXT:    v_and_b32_e32 v0, 0xffff, v0
134; GFX9-NEXT:    buffer_store_dword v0, off, s[0:3], 0
135; GFX9-NEXT:    s_endpgm
136entry:
137  %tid.x = call i32 @llvm.amdgcn.workitem.id.x()
138  %tid.y = call i32 @llvm.amdgcn.workitem.id.y()
139  %a.ptr = getelementptr i16, i16 addrspace(1)* %a, i32 %tid.x
140  %b.ptr = getelementptr i16, i16 addrspace(1)* %b, i32 %tid.y
141  %a.l = load i16, i16 addrspace(1)* %a.ptr
142  %b.l = load i16, i16 addrspace(1)* %b.ptr
143  %tmp = add i16 %a.l, %b.l
144  %tmp1 = trunc i16 %tmp to i8
145  %tmp2 = xor i8 %tmp1, -1
146  %tmp3 = and i8 %tmp2, 1
147  %tmp4 = zext i8 %tmp3 to i32
148  store i32 %tmp4, i32 addrspace(1)* %out
149  ret void
150}
151
152define amdgpu_kernel void @anyext_v2i16_to_v2i32() #0 {
153; GCN-LABEL: anyext_v2i16_to_v2i32:
154; GCN:       ; %bb.0: ; %bb
155; GCN-NEXT:    s_mov_b32 s3, 0xf000
156; GCN-NEXT:    s_mov_b32 s2, -1
157; GCN-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
158; GCN-NEXT:    s_waitcnt vmcnt(0)
159; GCN-NEXT:    v_and_b32_e32 v0, 0x8000, v0
160; GCN-NEXT:    v_lshlrev_b32_e32 v0, 16, v0
161; GCN-NEXT:    v_cmp_eq_f32_e32 vcc, 0, v0
162; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
163; GCN-NEXT:    buffer_store_byte v0, off, s[0:3], 0
164; GCN-NEXT:    s_endpgm
165;
166; GFX8-LABEL: anyext_v2i16_to_v2i32:
167; GFX8:       ; %bb.0: ; %bb
168; GFX8-NEXT:    s_mov_b32 s3, 0xf000
169; GFX8-NEXT:    s_mov_b32 s2, -1
170; GFX8-NEXT:    buffer_load_ushort v0, off, s[0:3], 0
171; GFX8-NEXT:    v_mov_b32_e32 v1, 0x8000
172; GFX8-NEXT:    s_waitcnt vmcnt(0)
173; GFX8-NEXT:    v_and_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
174; GFX8-NEXT:    v_cmp_eq_f32_e32 vcc, 0, v0
175; GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
176; GFX8-NEXT:    buffer_store_byte v0, off, s[0:3], 0
177; GFX8-NEXT:    s_endpgm
178;
179; GFX9-LABEL: anyext_v2i16_to_v2i32:
180; GFX9:       ; %bb.0: ; %bb
181; GFX9-NEXT:    global_load_short_d16_hi v0, v[0:1], off
182; GFX9-NEXT:    v_mov_b32_e32 v1, 0xffff
183; GFX9-NEXT:    s_mov_b32 s3, 0xf000
184; GFX9-NEXT:    s_mov_b32 s2, -1
185; GFX9-NEXT:    s_waitcnt vmcnt(0)
186; GFX9-NEXT:    v_and_b32_e32 v0, 0x80008000, v0
187; GFX9-NEXT:    v_bfi_b32 v0, v1, 0, v0
188; GFX9-NEXT:    v_cmp_eq_f32_e32 vcc, 0, v0
189; GFX9-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
190; GFX9-NEXT:    buffer_store_byte v0, off, s[0:3], 0
191; GFX9-NEXT:    s_endpgm
192bb:
193  %tmp = load i16, i16 addrspace(1)* undef, align 2
194  %tmp2 = insertelement <2 x i16> undef, i16 %tmp, i32 1
195  %tmp4 = and <2 x i16> %tmp2, <i16 -32768, i16 -32768>
196  %tmp5 = zext <2 x i16> %tmp4 to <2 x i32>
197  %tmp6 = shl nuw <2 x i32> %tmp5, <i32 16, i32 16>
198  %tmp7 = or <2 x i32> zeroinitializer, %tmp6
199  %tmp8 = bitcast <2 x i32> %tmp7 to <2 x float>
200  %tmp10 = fcmp oeq <2 x float> %tmp8, zeroinitializer
201  %tmp11 = zext <2 x i1> %tmp10 to <2 x i8>
202  %tmp12 = extractelement <2 x i8> %tmp11, i32 1
203  store i8 %tmp12, i8 addrspace(1)* undef, align 1
204  ret void
205}
206
207attributes #0 = { nounwind }
208