1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3
4define i128 @v_shl_i128_vv(i128 %lhs, i128 %rhs) {
5; GCN-LABEL: v_shl_i128_vv:
6; GCN:       ; %bb.0:
7; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
8; GCN-NEXT:    v_sub_i32_e32 v7, vcc, 64, v4
9; GCN-NEXT:    v_lshl_b64 v[5:6], v[2:3], v4
10; GCN-NEXT:    v_lshr_b64 v[7:8], v[0:1], v7
11; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v4
12; GCN-NEXT:    v_or_b32_e32 v7, v5, v7
13; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, 64, v4
14; GCN-NEXT:    v_or_b32_e32 v8, v6, v8
15; GCN-NEXT:    v_lshl_b64 v[5:6], v[0:1], v5
16; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v4
17; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
18; GCN-NEXT:    v_lshl_b64 v[0:1], v[0:1], v4
19; GCN-NEXT:    v_cndmask_b32_e64 v2, v5, v2, s[4:5]
20; GCN-NEXT:    v_cndmask_b32_e32 v5, v6, v8, vcc
21; GCN-NEXT:    v_cndmask_b32_e64 v3, v5, v3, s[4:5]
22; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
23; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
24; GCN-NEXT:    s_setpc_b64 s[30:31]
25  %shl = shl i128 %lhs, %rhs
26  ret i128 %shl
27}
28
29define i128 @v_lshr_i128_vv(i128 %lhs, i128 %rhs) {
30; GCN-LABEL: v_lshr_i128_vv:
31; GCN:       ; %bb.0:
32; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
33; GCN-NEXT:    v_sub_i32_e32 v7, vcc, 64, v4
34; GCN-NEXT:    v_lshr_b64 v[5:6], v[0:1], v4
35; GCN-NEXT:    v_lshl_b64 v[7:8], v[2:3], v7
36; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v4
37; GCN-NEXT:    v_or_b32_e32 v7, v5, v7
38; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, 64, v4
39; GCN-NEXT:    v_or_b32_e32 v8, v6, v8
40; GCN-NEXT:    v_lshr_b64 v[5:6], v[2:3], v5
41; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v4
42; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
43; GCN-NEXT:    v_lshr_b64 v[2:3], v[2:3], v4
44; GCN-NEXT:    v_cndmask_b32_e64 v0, v5, v0, s[4:5]
45; GCN-NEXT:    v_cndmask_b32_e32 v5, v6, v8, vcc
46; GCN-NEXT:    v_cndmask_b32_e64 v1, v5, v1, s[4:5]
47; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
48; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
49; GCN-NEXT:    s_setpc_b64 s[30:31]
50
51  %shl = lshr i128 %lhs, %rhs
52  ret i128 %shl
53}
54
55define i128 @v_ashr_i128_vv(i128 %lhs, i128 %rhs) {
56; GCN-LABEL: v_ashr_i128_vv:
57; GCN:       ; %bb.0:
58; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
59; GCN-NEXT:    v_sub_i32_e32 v7, vcc, 64, v4
60; GCN-NEXT:    v_lshr_b64 v[5:6], v[0:1], v4
61; GCN-NEXT:    v_lshl_b64 v[7:8], v[2:3], v7
62; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], 0, v4
63; GCN-NEXT:    v_or_b32_e32 v7, v5, v7
64; GCN-NEXT:    v_subrev_i32_e32 v5, vcc, 64, v4
65; GCN-NEXT:    v_or_b32_e32 v8, v6, v8
66; GCN-NEXT:    v_ashr_i64 v[5:6], v[2:3], v5
67; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v4
68; GCN-NEXT:    v_cndmask_b32_e32 v5, v5, v7, vcc
69; GCN-NEXT:    v_cndmask_b32_e64 v0, v5, v0, s[4:5]
70; GCN-NEXT:    v_cndmask_b32_e32 v5, v6, v8, vcc
71; GCN-NEXT:    v_cndmask_b32_e64 v1, v5, v1, s[4:5]
72; GCN-NEXT:    v_ashr_i64 v[4:5], v[2:3], v4
73; GCN-NEXT:    v_ashrrev_i32_e32 v3, 31, v3
74; GCN-NEXT:    v_cndmask_b32_e32 v2, v3, v4, vcc
75; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v5, vcc
76; GCN-NEXT:    s_setpc_b64 s[30:31]
77  %shl = ashr i128 %lhs, %rhs
78  ret i128 %shl
79}
80
81
82define i128 @v_shl_i128_vk(i128 %lhs) {
83; GCN-LABEL: v_shl_i128_vk:
84; GCN:       ; %bb.0:
85; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
86; GCN-NEXT:    v_alignbit_b32 v4, v2, v1, 15
87; GCN-NEXT:    v_alignbit_b32 v1, v1, v0, 15
88; GCN-NEXT:    v_alignbit_b32 v3, v3, v2, 15
89; GCN-NEXT:    v_lshlrev_b32_e32 v0, 17, v0
90; GCN-NEXT:    v_mov_b32_e32 v2, v4
91; GCN-NEXT:    s_setpc_b64 s[30:31]
92  %shl = shl i128 %lhs, 17
93  ret i128 %shl
94}
95
96define i128 @v_lshr_i128_vk(i128 %lhs) {
97; GCN-LABEL: v_lshr_i128_vk:
98; GCN:       ; %bb.0:
99; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
100; GCN-NEXT:    v_alignbit_b32 v0, v3, v2, 1
101; GCN-NEXT:    v_lshrrev_b32_e32 v1, 1, v3
102; GCN-NEXT:    v_mov_b32_e32 v2, 0
103; GCN-NEXT:    v_mov_b32_e32 v3, 0
104; GCN-NEXT:    s_setpc_b64 s[30:31]
105  %shl = lshr i128 %lhs, 65
106  ret i128 %shl
107}
108
109define i128 @v_ashr_i128_vk(i128 %lhs) {
110; GCN-LABEL: v_ashr_i128_vk:
111; GCN:       ; %bb.0:
112; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
113; GCN-NEXT:    v_ashr_i64 v[4:5], v[2:3], 33
114; GCN-NEXT:    v_alignbit_b32 v0, v2, v1, 1
115; GCN-NEXT:    v_alignbit_b32 v1, v3, v2, 1
116; GCN-NEXT:    v_mov_b32_e32 v2, v4
117; GCN-NEXT:    v_mov_b32_e32 v3, v5
118; GCN-NEXT:    s_setpc_b64 s[30:31]
119  %shl = ashr i128 %lhs, 33
120  ret i128 %shl
121}
122
123define i128 @v_shl_i128_kv(i128 %rhs) {
124; GCN-LABEL: v_shl_i128_kv:
125; GCN:       ; %bb.0:
126; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
127; GCN-NEXT:    v_sub_i32_e32 v1, vcc, 64, v0
128; GCN-NEXT:    v_lshr_b64 v[2:3], 17, v1
129; GCN-NEXT:    v_subrev_i32_e32 v1, vcc, 64, v0
130; GCN-NEXT:    v_lshl_b64 v[4:5], 17, v1
131; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v0
132; GCN-NEXT:    v_cndmask_b32_e32 v1, v4, v2, vcc
133; GCN-NEXT:    v_cmp_ne_u32_e64 s[4:5], 0, v0
134; GCN-NEXT:    v_cndmask_b32_e64 v2, 0, v1, s[4:5]
135; GCN-NEXT:    v_lshl_b64 v[0:1], 17, v0
136; GCN-NEXT:    v_cndmask_b32_e32 v3, v5, v3, vcc
137; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, v3, s[4:5]
138; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
139; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
140; GCN-NEXT:    s_setpc_b64 s[30:31]
141  %shl = shl i128 17, %rhs
142  ret i128 %shl
143}
144
145define i128 @v_lshr_i128_kv(i128 %rhs) {
146; GCN-LABEL: v_lshr_i128_kv:
147; GCN:       ; %bb.0:
148; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
149; GCN-NEXT:    s_mov_b64 s[4:5], 0x41
150; GCN-NEXT:    v_lshr_b64 v[1:2], s[4:5], v0
151; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v0
152; GCN-NEXT:    v_cmp_ne_u32_e64 s[4:5], 0, v0
153; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
154; GCN-NEXT:    v_mov_b32_e32 v3, 0x41
155; GCN-NEXT:    s_and_b64 vcc, s[4:5], vcc
156; GCN-NEXT:    v_cndmask_b32_e64 v0, v3, v1, s[4:5]
157; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc
158; GCN-NEXT:    v_mov_b32_e32 v2, 0
159; GCN-NEXT:    v_mov_b32_e32 v3, 0
160; GCN-NEXT:    s_setpc_b64 s[30:31]
161  %shl = lshr i128 65, %rhs
162  ret i128 %shl
163}
164
165define i128 @v_ashr_i128_kv(i128 %rhs) {
166; GCN-LABEL: v_ashr_i128_kv:
167; GCN:       ; %bb.0:
168; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
169; GCN-NEXT:    v_lshr_b64 v[1:2], 33, v0
170; GCN-NEXT:    v_cmp_gt_u32_e32 vcc, 64, v0
171; GCN-NEXT:    v_cmp_ne_u32_e64 s[4:5], 0, v0
172; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
173; GCN-NEXT:    s_and_b64 vcc, s[4:5], vcc
174; GCN-NEXT:    v_cndmask_b32_e64 v0, 33, v1, s[4:5]
175; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc
176; GCN-NEXT:    v_mov_b32_e32 v2, 0
177; GCN-NEXT:    v_mov_b32_e32 v3, 0
178; GCN-NEXT:    s_setpc_b64 s[30:31]
179  %shl = ashr i128 33, %rhs
180  ret i128 %shl
181}
182
183define amdgpu_kernel void @s_shl_i128_ss(i128 %lhs, i128 %rhs) {
184; GCN-LABEL: s_shl_i128_ss:
185; GCN:       ; %bb.0:
186; GCN-NEXT:    s_load_dwordx8 s[4:11], s[4:5], 0x0
187; GCN-NEXT:    v_mov_b32_e32 v4, 0
188; GCN-NEXT:    v_mov_b32_e32 v5, 0
189; GCN-NEXT:    s_waitcnt lgkmcnt(0)
190; GCN-NEXT:    s_sub_i32 s9, 64, s8
191; GCN-NEXT:    s_sub_i32 s2, s8, 64
192; GCN-NEXT:    s_lshl_b64 s[0:1], s[6:7], s8
193; GCN-NEXT:    s_lshr_b64 s[10:11], s[4:5], s9
194; GCN-NEXT:    s_lshl_b64 s[2:3], s[4:5], s2
195; GCN-NEXT:    s_or_b64 s[10:11], s[0:1], s[10:11]
196; GCN-NEXT:    s_cmp_lt_u32 s8, 64
197; GCN-NEXT:    v_mov_b32_e32 v0, s3
198; GCN-NEXT:    v_mov_b32_e32 v1, s11
199; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
200; GCN-NEXT:    s_cmp_eq_u32 s8, 0
201; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
202; GCN-NEXT:    v_mov_b32_e32 v1, s7
203; GCN-NEXT:    s_cselect_b64 s[0:1], -1, 0
204; GCN-NEXT:    v_cndmask_b32_e64 v3, v0, v1, s[0:1]
205; GCN-NEXT:    v_mov_b32_e32 v0, s2
206; GCN-NEXT:    v_mov_b32_e32 v1, s10
207; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
208; GCN-NEXT:    v_mov_b32_e32 v1, s6
209; GCN-NEXT:    v_cndmask_b32_e64 v2, v0, v1, s[0:1]
210; GCN-NEXT:    s_lshl_b64 s[0:1], s[4:5], s8
211; GCN-NEXT:    v_mov_b32_e32 v0, s1
212; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v0, vcc
213; GCN-NEXT:    v_mov_b32_e32 v0, s0
214; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
215; GCN-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
216; GCN-NEXT:    s_endpgm
217  %shift = shl i128 %lhs, %rhs
218  store i128 %shift, i128 addrspace(1)* null
219  ret void
220}
221
222define amdgpu_kernel void @s_lshr_i128_ss(i128 %lhs, i128 %rhs) {
223; GCN-LABEL: s_lshr_i128_ss:
224; GCN:       ; %bb.0:
225; GCN-NEXT:    s_load_dwordx8 s[4:11], s[4:5], 0x0
226; GCN-NEXT:    v_mov_b32_e32 v4, 0
227; GCN-NEXT:    v_mov_b32_e32 v5, 0
228; GCN-NEXT:    s_waitcnt lgkmcnt(0)
229; GCN-NEXT:    s_sub_i32 s9, 64, s8
230; GCN-NEXT:    s_sub_i32 s2, s8, 64
231; GCN-NEXT:    s_lshr_b64 s[0:1], s[4:5], s8
232; GCN-NEXT:    s_lshl_b64 s[10:11], s[6:7], s9
233; GCN-NEXT:    s_lshr_b64 s[2:3], s[6:7], s2
234; GCN-NEXT:    s_or_b64 s[10:11], s[0:1], s[10:11]
235; GCN-NEXT:    s_cmp_lt_u32 s8, 64
236; GCN-NEXT:    v_mov_b32_e32 v0, s3
237; GCN-NEXT:    v_mov_b32_e32 v1, s11
238; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
239; GCN-NEXT:    s_cmp_eq_u32 s8, 0
240; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
241; GCN-NEXT:    v_mov_b32_e32 v1, s5
242; GCN-NEXT:    s_cselect_b64 s[0:1], -1, 0
243; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
244; GCN-NEXT:    v_mov_b32_e32 v0, s2
245; GCN-NEXT:    v_mov_b32_e32 v2, s10
246; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
247; GCN-NEXT:    v_mov_b32_e32 v2, s4
248; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
249; GCN-NEXT:    s_lshr_b64 s[0:1], s[6:7], s8
250; GCN-NEXT:    v_mov_b32_e32 v2, s1
251; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v2, vcc
252; GCN-NEXT:    v_mov_b32_e32 v2, s0
253; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
254; GCN-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
255; GCN-NEXT:    s_endpgm
256  %shift = lshr i128 %lhs, %rhs
257  store i128 %shift, i128 addrspace(1)* null
258  ret void
259}
260
261define amdgpu_kernel void @s_ashr_i128_ss(i128 %lhs, i128 %rhs) {
262; GCN-LABEL: s_ashr_i128_ss:
263; GCN:       ; %bb.0:
264; GCN-NEXT:    s_load_dwordx8 s[4:11], s[4:5], 0x0
265; GCN-NEXT:    s_waitcnt lgkmcnt(0)
266; GCN-NEXT:    s_ashr_i32 s2, s7, 31
267; GCN-NEXT:    s_ashr_i64 s[0:1], s[6:7], s8
268; GCN-NEXT:    s_cmp_lt_u32 s8, 64
269; GCN-NEXT:    v_mov_b32_e32 v0, s2
270; GCN-NEXT:    v_mov_b32_e32 v1, s1
271; GCN-NEXT:    s_cselect_b64 vcc, -1, 0
272; GCN-NEXT:    v_cndmask_b32_e32 v3, v0, v1, vcc
273; GCN-NEXT:    v_mov_b32_e32 v1, s0
274; GCN-NEXT:    s_sub_i32 s0, s8, 64
275; GCN-NEXT:    s_ashr_i64 s[2:3], s[6:7], s0
276; GCN-NEXT:    s_sub_i32 s0, 64, s8
277; GCN-NEXT:    s_lshl_b64 s[0:1], s[6:7], s0
278; GCN-NEXT:    s_lshr_b64 s[6:7], s[4:5], s8
279; GCN-NEXT:    s_or_b64 s[6:7], s[6:7], s[0:1]
280; GCN-NEXT:    v_cndmask_b32_e32 v2, v0, v1, vcc
281; GCN-NEXT:    v_mov_b32_e32 v0, s3
282; GCN-NEXT:    v_mov_b32_e32 v1, s7
283; GCN-NEXT:    s_cmp_eq_u32 s8, 0
284; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
285; GCN-NEXT:    v_mov_b32_e32 v1, s5
286; GCN-NEXT:    s_cselect_b64 s[0:1], -1, 0
287; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
288; GCN-NEXT:    v_mov_b32_e32 v0, s2
289; GCN-NEXT:    v_mov_b32_e32 v4, s6
290; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v4, vcc
291; GCN-NEXT:    v_mov_b32_e32 v6, s4
292; GCN-NEXT:    v_mov_b32_e32 v4, 0
293; GCN-NEXT:    v_mov_b32_e32 v5, 0
294; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v6, s[0:1]
295; GCN-NEXT:    flat_store_dwordx4 v[4:5], v[0:3]
296; GCN-NEXT:    s_endpgm
297  %shift = ashr i128 %lhs, %rhs
298  store i128 %shift, i128 addrspace(1)* null
299  ret void
300}
301
302define <2 x i128> @v_shl_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
303; GCN-LABEL: v_shl_v2i128_vv:
304; GCN:       ; %bb.0:
305; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
306; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 64, v8
307; GCN-NEXT:    v_lshr_b64 v[16:17], v[0:1], v16
308; GCN-NEXT:    v_lshl_b64 v[18:19], v[2:3], v8
309; GCN-NEXT:    v_cmp_gt_u64_e32 vcc, 64, v[8:9]
310; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
311; GCN-NEXT:    v_or_b32_e32 v11, v9, v11
312; GCN-NEXT:    v_subrev_i32_e64 v9, s[6:7], 64, v8
313; GCN-NEXT:    v_or_b32_e32 v19, v19, v17
314; GCN-NEXT:    v_or_b32_e32 v18, v18, v16
315; GCN-NEXT:    v_or_b32_e32 v10, v8, v10
316; GCN-NEXT:    v_lshl_b64 v[16:17], v[0:1], v9
317; GCN-NEXT:    s_and_b64 vcc, s[4:5], vcc
318; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
319; GCN-NEXT:    v_cndmask_b32_e32 v9, v16, v18, vcc
320; GCN-NEXT:    v_cndmask_b32_e64 v2, v9, v2, s[4:5]
321; GCN-NEXT:    v_sub_i32_e64 v9, s[6:7], 64, v12
322; GCN-NEXT:    v_cndmask_b32_e32 v11, v17, v19, vcc
323; GCN-NEXT:    v_lshr_b64 v[9:10], v[4:5], v9
324; GCN-NEXT:    v_lshl_b64 v[16:17], v[6:7], v12
325; GCN-NEXT:    v_cndmask_b32_e64 v3, v11, v3, s[4:5]
326; GCN-NEXT:    v_or_b32_e32 v16, v16, v9
327; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[12:13]
328; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
329; GCN-NEXT:    v_subrev_i32_e64 v9, s[8:9], 64, v12
330; GCN-NEXT:    v_or_b32_e32 v11, v17, v10
331; GCN-NEXT:    v_lshl_b64 v[9:10], v[4:5], v9
332; GCN-NEXT:    v_or_b32_e32 v15, v13, v15
333; GCN-NEXT:    v_or_b32_e32 v14, v12, v14
334; GCN-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
335; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
336; GCN-NEXT:    v_cndmask_b32_e64 v9, v9, v16, s[4:5]
337; GCN-NEXT:    v_lshl_b64 v[0:1], v[0:1], v8
338; GCN-NEXT:    v_lshl_b64 v[4:5], v[4:5], v12
339; GCN-NEXT:    v_cndmask_b32_e64 v6, v9, v6, s[6:7]
340; GCN-NEXT:    v_cndmask_b32_e64 v9, v10, v11, s[4:5]
341; GCN-NEXT:    v_cndmask_b32_e64 v7, v9, v7, s[6:7]
342; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
343; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v1, vcc
344; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, v4, s[4:5]
345; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, v5, s[4:5]
346; GCN-NEXT:    s_setpc_b64 s[30:31]
347  %shl = shl <2 x i128> %lhs, %rhs
348  ret <2 x i128> %shl
349}
350
351define <2 x i128> @v_lshr_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
352; GCN-LABEL: v_lshr_v2i128_vv:
353; GCN:       ; %bb.0:
354; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
355; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 64, v8
356; GCN-NEXT:    v_lshl_b64 v[16:17], v[2:3], v16
357; GCN-NEXT:    v_lshr_b64 v[18:19], v[0:1], v8
358; GCN-NEXT:    v_cmp_gt_u64_e32 vcc, 64, v[8:9]
359; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
360; GCN-NEXT:    v_or_b32_e32 v11, v9, v11
361; GCN-NEXT:    v_subrev_i32_e64 v9, s[6:7], 64, v8
362; GCN-NEXT:    v_or_b32_e32 v19, v19, v17
363; GCN-NEXT:    v_or_b32_e32 v18, v18, v16
364; GCN-NEXT:    v_or_b32_e32 v10, v8, v10
365; GCN-NEXT:    v_lshr_b64 v[16:17], v[2:3], v9
366; GCN-NEXT:    s_and_b64 vcc, s[4:5], vcc
367; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
368; GCN-NEXT:    v_cndmask_b32_e32 v9, v16, v18, vcc
369; GCN-NEXT:    v_cndmask_b32_e64 v0, v9, v0, s[4:5]
370; GCN-NEXT:    v_sub_i32_e64 v9, s[6:7], 64, v12
371; GCN-NEXT:    v_cndmask_b32_e32 v11, v17, v19, vcc
372; GCN-NEXT:    v_lshl_b64 v[9:10], v[6:7], v9
373; GCN-NEXT:    v_lshr_b64 v[16:17], v[4:5], v12
374; GCN-NEXT:    v_cndmask_b32_e64 v1, v11, v1, s[4:5]
375; GCN-NEXT:    v_or_b32_e32 v16, v16, v9
376; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[12:13]
377; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
378; GCN-NEXT:    v_subrev_i32_e64 v9, s[8:9], 64, v12
379; GCN-NEXT:    v_or_b32_e32 v11, v17, v10
380; GCN-NEXT:    v_lshr_b64 v[9:10], v[6:7], v9
381; GCN-NEXT:    v_or_b32_e32 v15, v13, v15
382; GCN-NEXT:    v_or_b32_e32 v14, v12, v14
383; GCN-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
384; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
385; GCN-NEXT:    v_cndmask_b32_e64 v9, v9, v16, s[4:5]
386; GCN-NEXT:    v_lshr_b64 v[2:3], v[2:3], v8
387; GCN-NEXT:    v_lshr_b64 v[6:7], v[6:7], v12
388; GCN-NEXT:    v_cndmask_b32_e64 v4, v9, v4, s[6:7]
389; GCN-NEXT:    v_cndmask_b32_e64 v9, v10, v11, s[4:5]
390; GCN-NEXT:    v_cndmask_b32_e64 v5, v9, v5, s[6:7]
391; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
392; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v3, vcc
393; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, v6, s[4:5]
394; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, v7, s[4:5]
395; GCN-NEXT:    s_setpc_b64 s[30:31]
396  %shl = lshr <2 x i128> %lhs, %rhs
397  ret <2 x i128> %shl
398}
399
400define <2 x i128> @v_ashr_v2i128_vv(<2 x i128> %lhs, <2 x i128> %rhs) {
401; GCN-LABEL: v_ashr_v2i128_vv:
402; GCN:       ; %bb.0:
403; GCN-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
404; GCN-NEXT:    v_sub_i32_e32 v16, vcc, 64, v8
405; GCN-NEXT:    v_lshl_b64 v[16:17], v[2:3], v16
406; GCN-NEXT:    v_lshr_b64 v[18:19], v[0:1], v8
407; GCN-NEXT:    v_cmp_gt_u64_e32 vcc, 64, v[8:9]
408; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
409; GCN-NEXT:    v_or_b32_e32 v11, v9, v11
410; GCN-NEXT:    v_subrev_i32_e64 v9, s[6:7], 64, v8
411; GCN-NEXT:    v_or_b32_e32 v19, v19, v17
412; GCN-NEXT:    v_or_b32_e32 v18, v18, v16
413; GCN-NEXT:    v_or_b32_e32 v10, v8, v10
414; GCN-NEXT:    v_ashr_i64 v[16:17], v[2:3], v9
415; GCN-NEXT:    s_and_b64 vcc, s[4:5], vcc
416; GCN-NEXT:    v_cmp_eq_u64_e64 s[4:5], 0, v[10:11]
417; GCN-NEXT:    v_cndmask_b32_e32 v9, v16, v18, vcc
418; GCN-NEXT:    v_cndmask_b32_e64 v0, v9, v0, s[4:5]
419; GCN-NEXT:    v_sub_i32_e64 v9, s[6:7], 64, v12
420; GCN-NEXT:    v_cndmask_b32_e32 v11, v17, v19, vcc
421; GCN-NEXT:    v_lshl_b64 v[9:10], v[6:7], v9
422; GCN-NEXT:    v_lshr_b64 v[16:17], v[4:5], v12
423; GCN-NEXT:    v_cndmask_b32_e64 v1, v11, v1, s[4:5]
424; GCN-NEXT:    v_or_b32_e32 v16, v16, v9
425; GCN-NEXT:    v_cmp_gt_u64_e64 s[4:5], 64, v[12:13]
426; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
427; GCN-NEXT:    v_subrev_i32_e64 v9, s[8:9], 64, v12
428; GCN-NEXT:    v_or_b32_e32 v11, v17, v10
429; GCN-NEXT:    v_ashr_i64 v[9:10], v[6:7], v9
430; GCN-NEXT:    v_or_b32_e32 v15, v13, v15
431; GCN-NEXT:    v_or_b32_e32 v14, v12, v14
432; GCN-NEXT:    s_and_b64 s[4:5], s[6:7], s[4:5]
433; GCN-NEXT:    v_cmp_eq_u64_e64 s[6:7], 0, v[14:15]
434; GCN-NEXT:    v_cndmask_b32_e64 v9, v9, v16, s[4:5]
435; GCN-NEXT:    v_cndmask_b32_e64 v4, v9, v4, s[6:7]
436; GCN-NEXT:    v_cndmask_b32_e64 v9, v10, v11, s[4:5]
437; GCN-NEXT:    v_cndmask_b32_e64 v5, v9, v5, s[6:7]
438; GCN-NEXT:    v_ashr_i64 v[8:9], v[2:3], v8
439; GCN-NEXT:    v_ashrrev_i32_e32 v3, 31, v3
440; GCN-NEXT:    v_cndmask_b32_e32 v2, v3, v8, vcc
441; GCN-NEXT:    v_cndmask_b32_e32 v3, v3, v9, vcc
442; GCN-NEXT:    v_ashr_i64 v[8:9], v[6:7], v12
443; GCN-NEXT:    v_ashrrev_i32_e32 v7, 31, v7
444; GCN-NEXT:    v_cndmask_b32_e64 v6, v7, v8, s[4:5]
445; GCN-NEXT:    v_cndmask_b32_e64 v7, v7, v9, s[4:5]
446; GCN-NEXT:    s_setpc_b64 s[30:31]
447  %shl = ashr <2 x i128> %lhs, %rhs
448  ret <2 x i128> %shl
449}
450
451define amdgpu_kernel void @s_shl_v2i128ss(<2 x i128> %lhs, <2 x i128> %rhs) {
452; GCN-LABEL: s_shl_v2i128ss:
453; GCN:       ; %bb.0:
454; GCN-NEXT:    s_load_dwordx16 s[4:19], s[4:5], 0x0
455; GCN-NEXT:    v_mov_b32_e32 v10, 16
456; GCN-NEXT:    v_mov_b32_e32 v8, 0
457; GCN-NEXT:    v_mov_b32_e32 v11, 0
458; GCN-NEXT:    v_mov_b32_e32 v9, 0
459; GCN-NEXT:    s_waitcnt lgkmcnt(0)
460; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[12:13], 64
461; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[14:15], 0
462; GCN-NEXT:    s_sub_i32 s22, 64, s12
463; GCN-NEXT:    s_sub_i32 s20, s12, 64
464; GCN-NEXT:    s_lshr_b64 s[22:23], s[4:5], s22
465; GCN-NEXT:    s_lshl_b64 s[24:25], s[6:7], s12
466; GCN-NEXT:    s_lshl_b64 s[20:21], s[4:5], s20
467; GCN-NEXT:    s_or_b64 s[22:23], s[24:25], s[22:23]
468; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
469; GCN-NEXT:    s_or_b64 s[0:1], s[12:13], s[14:15]
470; GCN-NEXT:    v_mov_b32_e32 v0, s21
471; GCN-NEXT:    v_mov_b32_e32 v1, s23
472; GCN-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[0:1], 0
473; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
474; GCN-NEXT:    v_mov_b32_e32 v1, s7
475; GCN-NEXT:    v_cndmask_b32_e64 v3, v0, v1, s[0:1]
476; GCN-NEXT:    v_mov_b32_e32 v0, s20
477; GCN-NEXT:    v_mov_b32_e32 v1, s22
478; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
479; GCN-NEXT:    v_mov_b32_e32 v1, s6
480; GCN-NEXT:    v_cndmask_b32_e64 v2, v0, v1, s[0:1]
481; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[16:17], 64
482; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[18:19], 0
483; GCN-NEXT:    s_sub_i32 s13, 64, s16
484; GCN-NEXT:    s_sub_i32 s6, s16, 64
485; GCN-NEXT:    s_lshr_b64 s[14:15], s[8:9], s13
486; GCN-NEXT:    s_lshl_b64 s[20:21], s[10:11], s16
487; GCN-NEXT:    s_lshl_b64 s[6:7], s[8:9], s6
488; GCN-NEXT:    s_or_b64 s[14:15], s[20:21], s[14:15]
489; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
490; GCN-NEXT:    s_or_b64 s[2:3], s[16:17], s[18:19]
491; GCN-NEXT:    v_mov_b32_e32 v0, s7
492; GCN-NEXT:    v_mov_b32_e32 v1, s15
493; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[2:3], 0
494; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s[0:1]
495; GCN-NEXT:    v_mov_b32_e32 v1, s11
496; GCN-NEXT:    v_cndmask_b32_e64 v7, v0, v1, s[2:3]
497; GCN-NEXT:    v_mov_b32_e32 v0, s6
498; GCN-NEXT:    v_mov_b32_e32 v1, s14
499; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v1, s[0:1]
500; GCN-NEXT:    v_mov_b32_e32 v1, s10
501; GCN-NEXT:    v_cndmask_b32_e64 v6, v0, v1, s[2:3]
502; GCN-NEXT:    s_lshl_b64 s[2:3], s[4:5], s12
503; GCN-NEXT:    v_mov_b32_e32 v0, s3
504; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v0, vcc
505; GCN-NEXT:    v_mov_b32_e32 v0, s2
506; GCN-NEXT:    s_lshl_b64 s[2:3], s[8:9], s16
507; GCN-NEXT:    v_mov_b32_e32 v4, s3
508; GCN-NEXT:    v_cndmask_b32_e64 v5, 0, v4, s[0:1]
509; GCN-NEXT:    v_mov_b32_e32 v4, s2
510; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, v4, s[0:1]
511; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
512; GCN-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
513; GCN-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
514; GCN-NEXT:    s_endpgm
515  %shift = shl <2 x i128> %lhs, %rhs
516  store <2 x i128> %shift, <2 x i128> addrspace(1)* null
517  ret void
518}
519
520define amdgpu_kernel void @s_lshr_v2i128_ss(<2 x i128> %lhs, <2 x i128> %rhs) {
521; GCN-LABEL: s_lshr_v2i128_ss:
522; GCN:       ; %bb.0:
523; GCN-NEXT:    s_load_dwordx16 s[4:19], s[4:5], 0x0
524; GCN-NEXT:    v_mov_b32_e32 v10, 16
525; GCN-NEXT:    v_mov_b32_e32 v8, 0
526; GCN-NEXT:    v_mov_b32_e32 v11, 0
527; GCN-NEXT:    v_mov_b32_e32 v9, 0
528; GCN-NEXT:    s_waitcnt lgkmcnt(0)
529; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[12:13], 64
530; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[14:15], 0
531; GCN-NEXT:    s_sub_i32 s22, 64, s12
532; GCN-NEXT:    s_sub_i32 s20, s12, 64
533; GCN-NEXT:    s_lshl_b64 s[22:23], s[6:7], s22
534; GCN-NEXT:    s_lshr_b64 s[24:25], s[4:5], s12
535; GCN-NEXT:    s_lshr_b64 s[20:21], s[6:7], s20
536; GCN-NEXT:    s_or_b64 s[22:23], s[24:25], s[22:23]
537; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
538; GCN-NEXT:    s_or_b64 s[0:1], s[12:13], s[14:15]
539; GCN-NEXT:    v_mov_b32_e32 v0, s21
540; GCN-NEXT:    v_mov_b32_e32 v1, s23
541; GCN-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[0:1], 0
542; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
543; GCN-NEXT:    v_mov_b32_e32 v1, s5
544; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
545; GCN-NEXT:    v_mov_b32_e32 v0, s20
546; GCN-NEXT:    v_mov_b32_e32 v2, s22
547; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
548; GCN-NEXT:    v_mov_b32_e32 v2, s4
549; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
550; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[16:17], 64
551; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[18:19], 0
552; GCN-NEXT:    s_sub_i32 s13, 64, s16
553; GCN-NEXT:    s_sub_i32 s4, s16, 64
554; GCN-NEXT:    s_lshl_b64 s[14:15], s[10:11], s13
555; GCN-NEXT:    s_lshr_b64 s[20:21], s[8:9], s16
556; GCN-NEXT:    s_lshr_b64 s[4:5], s[10:11], s4
557; GCN-NEXT:    s_or_b64 s[14:15], s[20:21], s[14:15]
558; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
559; GCN-NEXT:    s_or_b64 s[2:3], s[16:17], s[18:19]
560; GCN-NEXT:    v_mov_b32_e32 v2, s5
561; GCN-NEXT:    v_mov_b32_e32 v3, s15
562; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[2:3], 0
563; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
564; GCN-NEXT:    v_mov_b32_e32 v3, s9
565; GCN-NEXT:    v_cndmask_b32_e64 v5, v2, v3, s[2:3]
566; GCN-NEXT:    v_mov_b32_e32 v2, s4
567; GCN-NEXT:    v_mov_b32_e32 v3, s14
568; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
569; GCN-NEXT:    v_mov_b32_e32 v3, s8
570; GCN-NEXT:    v_cndmask_b32_e64 v4, v2, v3, s[2:3]
571; GCN-NEXT:    s_lshr_b64 s[2:3], s[6:7], s12
572; GCN-NEXT:    v_mov_b32_e32 v2, s3
573; GCN-NEXT:    v_cndmask_b32_e32 v3, 0, v2, vcc
574; GCN-NEXT:    v_mov_b32_e32 v2, s2
575; GCN-NEXT:    s_lshr_b64 s[2:3], s[10:11], s16
576; GCN-NEXT:    v_mov_b32_e32 v6, s3
577; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, v6, s[0:1]
578; GCN-NEXT:    v_mov_b32_e32 v6, s2
579; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, v6, s[0:1]
580; GCN-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
581; GCN-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
582; GCN-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
583; GCN-NEXT:    s_endpgm
584  %shift = lshr <2 x i128> %lhs, %rhs
585  store <2 x i128> %shift, <2 x i128> addrspace(1)* null
586  ret void
587}
588
589define amdgpu_kernel void @s_ashr_v2i128_ss(<2 x i128> %lhs, <2 x i128> %rhs) {
590; GCN-LABEL: s_ashr_v2i128_ss:
591; GCN:       ; %bb.0:
592; GCN-NEXT:    s_load_dwordx16 s[4:19], s[4:5], 0x0
593; GCN-NEXT:    v_mov_b32_e32 v10, 16
594; GCN-NEXT:    v_mov_b32_e32 v8, 0
595; GCN-NEXT:    v_mov_b32_e32 v11, 0
596; GCN-NEXT:    v_mov_b32_e32 v9, 0
597; GCN-NEXT:    s_waitcnt lgkmcnt(0)
598; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[12:13], 64
599; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[14:15], 0
600; GCN-NEXT:    s_sub_i32 s22, 64, s12
601; GCN-NEXT:    s_sub_i32 s20, s12, 64
602; GCN-NEXT:    s_lshl_b64 s[22:23], s[6:7], s22
603; GCN-NEXT:    s_lshr_b64 s[24:25], s[4:5], s12
604; GCN-NEXT:    s_ashr_i64 s[20:21], s[6:7], s20
605; GCN-NEXT:    s_or_b64 s[22:23], s[24:25], s[22:23]
606; GCN-NEXT:    s_and_b64 vcc, s[2:3], s[0:1]
607; GCN-NEXT:    s_or_b64 s[0:1], s[12:13], s[14:15]
608; GCN-NEXT:    v_mov_b32_e32 v0, s21
609; GCN-NEXT:    v_mov_b32_e32 v1, s23
610; GCN-NEXT:    v_cmp_eq_u64_e64 s[0:1], s[0:1], 0
611; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v1, vcc
612; GCN-NEXT:    v_mov_b32_e32 v1, s5
613; GCN-NEXT:    v_cndmask_b32_e64 v1, v0, v1, s[0:1]
614; GCN-NEXT:    v_mov_b32_e32 v0, s20
615; GCN-NEXT:    v_mov_b32_e32 v2, s22
616; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
617; GCN-NEXT:    v_mov_b32_e32 v2, s4
618; GCN-NEXT:    v_cndmask_b32_e64 v0, v0, v2, s[0:1]
619; GCN-NEXT:    v_cmp_lt_u64_e64 s[0:1], s[16:17], 64
620; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[18:19], 0
621; GCN-NEXT:    s_sub_i32 s13, 64, s16
622; GCN-NEXT:    s_sub_i32 s4, s16, 64
623; GCN-NEXT:    s_lshl_b64 s[14:15], s[10:11], s13
624; GCN-NEXT:    s_lshr_b64 s[20:21], s[8:9], s16
625; GCN-NEXT:    s_ashr_i64 s[4:5], s[10:11], s4
626; GCN-NEXT:    s_or_b64 s[14:15], s[20:21], s[14:15]
627; GCN-NEXT:    s_and_b64 s[0:1], s[2:3], s[0:1]
628; GCN-NEXT:    s_or_b64 s[2:3], s[16:17], s[18:19]
629; GCN-NEXT:    v_mov_b32_e32 v2, s5
630; GCN-NEXT:    v_mov_b32_e32 v3, s15
631; GCN-NEXT:    v_cmp_eq_u64_e64 s[2:3], s[2:3], 0
632; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
633; GCN-NEXT:    v_mov_b32_e32 v3, s9
634; GCN-NEXT:    v_cndmask_b32_e64 v5, v2, v3, s[2:3]
635; GCN-NEXT:    v_mov_b32_e32 v2, s4
636; GCN-NEXT:    v_mov_b32_e32 v3, s14
637; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v3, s[0:1]
638; GCN-NEXT:    v_mov_b32_e32 v3, s8
639; GCN-NEXT:    v_cndmask_b32_e64 v4, v2, v3, s[2:3]
640; GCN-NEXT:    s_ashr_i32 s4, s7, 31
641; GCN-NEXT:    s_ashr_i64 s[2:3], s[6:7], s12
642; GCN-NEXT:    v_mov_b32_e32 v2, s4
643; GCN-NEXT:    v_mov_b32_e32 v3, s3
644; GCN-NEXT:    v_mov_b32_e32 v6, s2
645; GCN-NEXT:    s_ashr_i32 s4, s11, 31
646; GCN-NEXT:    s_ashr_i64 s[2:3], s[10:11], s16
647; GCN-NEXT:    v_cndmask_b32_e32 v3, v2, v3, vcc
648; GCN-NEXT:    v_cndmask_b32_e32 v2, v2, v6, vcc
649; GCN-NEXT:    v_mov_b32_e32 v6, s4
650; GCN-NEXT:    v_mov_b32_e32 v7, s3
651; GCN-NEXT:    v_mov_b32_e32 v12, s2
652; GCN-NEXT:    v_cndmask_b32_e64 v7, v6, v7, s[0:1]
653; GCN-NEXT:    v_cndmask_b32_e64 v6, v6, v12, s[0:1]
654; GCN-NEXT:    flat_store_dwordx4 v[10:11], v[4:7]
655; GCN-NEXT:    flat_store_dwordx4 v[8:9], v[0:3]
656; GCN-NEXT:    s_endpgm
657  %shift = ashr <2 x i128> %lhs, %rhs
658  store <2 x i128> %shift, <2 x i128> addrspace(1)* null
659  ret void
660}
661
662