1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple riscv32 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -verify-machineinstrs < %s | FileCheck %s
3; RUN: llc -mtriple riscv64 -mattr=+m,+d,+zfh,+experimental-zvfh,+v -verify-machineinstrs < %s | FileCheck %s
4
5define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec) {
6; CHECK-LABEL: extract_nxv8i32_nxv4i32_0:
7; CHECK:       # %bb.0:
8; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m4
9; CHECK-NEXT:    ret
10  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
11  ret <vscale x 4 x i32> %c
12}
13
14define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec) {
15; CHECK-LABEL: extract_nxv8i32_nxv4i32_4:
16; CHECK:       # %bb.0:
17; CHECK-NEXT:    vmv2r.v v8, v10
18; CHECK-NEXT:    ret
19  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
20  ret <vscale x 4 x i32> %c
21}
22
23define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec) {
24; CHECK-LABEL: extract_nxv8i32_nxv2i32_0:
25; CHECK:       # %bb.0:
26; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m4
27; CHECK-NEXT:    ret
28  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
29  ret <vscale x 2 x i32> %c
30}
31
32define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_2(<vscale x 8 x i32> %vec) {
33; CHECK-LABEL: extract_nxv8i32_nxv2i32_2:
34; CHECK:       # %bb.0:
35; CHECK-NEXT:    vmv1r.v v8, v9
36; CHECK-NEXT:    ret
37  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 2)
38  ret <vscale x 2 x i32> %c
39}
40
41define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_4(<vscale x 8 x i32> %vec) {
42; CHECK-LABEL: extract_nxv8i32_nxv2i32_4:
43; CHECK:       # %bb.0:
44; CHECK-NEXT:    vmv1r.v v8, v10
45; CHECK-NEXT:    ret
46  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 4)
47  ret <vscale x 2 x i32> %c
48}
49
50define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec) {
51; CHECK-LABEL: extract_nxv8i32_nxv2i32_6:
52; CHECK:       # %bb.0:
53; CHECK-NEXT:    vmv1r.v v8, v11
54; CHECK-NEXT:    ret
55  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 6)
56  ret <vscale x 2 x i32> %c
57}
58
59define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec) {
60; CHECK-LABEL: extract_nxv16i32_nxv8i32_0:
61; CHECK:       # %bb.0:
62; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m8
63; CHECK-NEXT:    ret
64  %c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
65  ret <vscale x 8 x i32> %c
66}
67
68define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec) {
69; CHECK-LABEL: extract_nxv16i32_nxv8i32_8:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    vmv4r.v v8, v12
72; CHECK-NEXT:    ret
73  %c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
74  ret <vscale x 8 x i32> %c
75}
76
77define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec) {
78; CHECK-LABEL: extract_nxv16i32_nxv4i32_0:
79; CHECK:       # %bb.0:
80; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m8
81; CHECK-NEXT:    ret
82  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
83  ret <vscale x 4 x i32> %c
84}
85
86define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_4(<vscale x 16 x i32> %vec) {
87; CHECK-LABEL: extract_nxv16i32_nxv4i32_4:
88; CHECK:       # %bb.0:
89; CHECK-NEXT:    vmv2r.v v8, v10
90; CHECK-NEXT:    ret
91  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
92  ret <vscale x 4 x i32> %c
93}
94
95define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_8(<vscale x 16 x i32> %vec) {
96; CHECK-LABEL: extract_nxv16i32_nxv4i32_8:
97; CHECK:       # %bb.0:
98; CHECK-NEXT:    vmv2r.v v8, v12
99; CHECK-NEXT:    ret
100  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
101  ret <vscale x 4 x i32> %c
102}
103
104define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec) {
105; CHECK-LABEL: extract_nxv16i32_nxv4i32_12:
106; CHECK:       # %bb.0:
107; CHECK-NEXT:    vmv2r.v v8, v14
108; CHECK-NEXT:    ret
109  %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
110  ret <vscale x 4 x i32> %c
111}
112
113define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec) {
114; CHECK-LABEL: extract_nxv16i32_nxv2i32_0:
115; CHECK:       # %bb.0:
116; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m8
117; CHECK-NEXT:    ret
118  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
119  ret <vscale x 2 x i32> %c
120}
121
122define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_2(<vscale x 16 x i32> %vec) {
123; CHECK-LABEL: extract_nxv16i32_nxv2i32_2:
124; CHECK:       # %bb.0:
125; CHECK-NEXT:    vmv1r.v v8, v9
126; CHECK-NEXT:    ret
127  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
128  ret <vscale x 2 x i32> %c
129}
130
131define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_4(<vscale x 16 x i32> %vec) {
132; CHECK-LABEL: extract_nxv16i32_nxv2i32_4:
133; CHECK:       # %bb.0:
134; CHECK-NEXT:    vmv1r.v v8, v10
135; CHECK-NEXT:    ret
136  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 4)
137  ret <vscale x 2 x i32> %c
138}
139
140define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_6(<vscale x 16 x i32> %vec) {
141; CHECK-LABEL: extract_nxv16i32_nxv2i32_6:
142; CHECK:       # %bb.0:
143; CHECK-NEXT:    vmv1r.v v8, v11
144; CHECK-NEXT:    ret
145  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 6)
146  ret <vscale x 2 x i32> %c
147}
148
149define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_8(<vscale x 16 x i32> %vec) {
150; CHECK-LABEL: extract_nxv16i32_nxv2i32_8:
151; CHECK:       # %bb.0:
152; CHECK-NEXT:    vmv1r.v v8, v12
153; CHECK-NEXT:    ret
154  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 8)
155  ret <vscale x 2 x i32> %c
156}
157
158define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_10(<vscale x 16 x i32> %vec) {
159; CHECK-LABEL: extract_nxv16i32_nxv2i32_10:
160; CHECK:       # %bb.0:
161; CHECK-NEXT:    vmv1r.v v8, v13
162; CHECK-NEXT:    ret
163  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 10)
164  ret <vscale x 2 x i32> %c
165}
166
167define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_12(<vscale x 16 x i32> %vec) {
168; CHECK-LABEL: extract_nxv16i32_nxv2i32_12:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    vmv1r.v v8, v14
171; CHECK-NEXT:    ret
172  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 12)
173  ret <vscale x 2 x i32> %c
174}
175
176define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec) {
177; CHECK-LABEL: extract_nxv16i32_nxv2i32_14:
178; CHECK:       # %bb.0:
179; CHECK-NEXT:    vmv1r.v v8, v15
180; CHECK-NEXT:    ret
181  %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 14)
182  ret <vscale x 2 x i32> %c
183}
184
185define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_0(<vscale x 16 x i32> %vec) {
186; CHECK-LABEL: extract_nxv16i32_nxv1i32_0:
187; CHECK:       # %bb.0:
188; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m8
189; CHECK-NEXT:    ret
190  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
191  ret <vscale x 1 x i32> %c
192}
193
194define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_1(<vscale x 16 x i32> %vec) {
195; CHECK-LABEL: extract_nxv16i32_nxv1i32_1:
196; CHECK:       # %bb.0:
197; CHECK-NEXT:    csrr a0, vlenb
198; CHECK-NEXT:    srli a0, a0, 3
199; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
200; CHECK-NEXT:    vslidedown.vx v8, v8, a0
201; CHECK-NEXT:    ret
202  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 1)
203  ret <vscale x 1 x i32> %c
204}
205
206define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_3(<vscale x 16 x i32> %vec) {
207; CHECK-LABEL: extract_nxv16i32_nxv1i32_3:
208; CHECK:       # %bb.0:
209; CHECK-NEXT:    csrr a0, vlenb
210; CHECK-NEXT:    srli a0, a0, 3
211; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
212; CHECK-NEXT:    vslidedown.vx v8, v9, a0
213; CHECK-NEXT:    ret
214  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 3)
215  ret <vscale x 1 x i32> %c
216}
217
218define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_15(<vscale x 16 x i32> %vec) {
219; CHECK-LABEL: extract_nxv16i32_nxv1i32_15:
220; CHECK:       # %bb.0:
221; CHECK-NEXT:    csrr a0, vlenb
222; CHECK-NEXT:    srli a0, a0, 3
223; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
224; CHECK-NEXT:    vslidedown.vx v8, v15, a0
225; CHECK-NEXT:    ret
226  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 15)
227  ret <vscale x 1 x i32> %c
228}
229
230define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_2(<vscale x 16 x i32> %vec) {
231; CHECK-LABEL: extract_nxv16i32_nxv1i32_2:
232; CHECK:       # %bb.0:
233; CHECK-NEXT:    vmv1r.v v8, v9
234; CHECK-NEXT:    ret
235  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 2)
236  ret <vscale x 1 x i32> %c
237}
238
239define <vscale x 1 x i32> @extract_nxv2i32_nxv1i32_0(<vscale x 2 x i32> %vec) {
240; CHECK-LABEL: extract_nxv2i32_nxv1i32_0:
241; CHECK:       # %bb.0:
242; CHECK-NEXT:    ret
243  %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> %vec, i64 0)
244  ret <vscale x 1 x i32> %c
245}
246
247define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_0(<vscale x 32 x i8> %vec) {
248; CHECK-LABEL: extract_nxv32i8_nxv2i8_0:
249; CHECK:       # %bb.0:
250; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m4
251; CHECK-NEXT:    ret
252  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 0)
253  ret <vscale x 2 x i8> %c
254}
255
256define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_2(<vscale x 32 x i8> %vec) {
257; CHECK-LABEL: extract_nxv32i8_nxv2i8_2:
258; CHECK:       # %bb.0:
259; CHECK-NEXT:    csrr a0, vlenb
260; CHECK-NEXT:    srli a0, a0, 2
261; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
262; CHECK-NEXT:    vslidedown.vx v8, v8, a0
263; CHECK-NEXT:    ret
264  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 2)
265  ret <vscale x 2 x i8> %c
266}
267
268define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_4(<vscale x 32 x i8> %vec) {
269; CHECK-LABEL: extract_nxv32i8_nxv2i8_4:
270; CHECK:       # %bb.0:
271; CHECK-NEXT:    csrr a0, vlenb
272; CHECK-NEXT:    srli a0, a0, 1
273; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
274; CHECK-NEXT:    vslidedown.vx v8, v8, a0
275; CHECK-NEXT:    ret
276  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 4)
277  ret <vscale x 2 x i8> %c
278}
279
280define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_6(<vscale x 32 x i8> %vec) {
281; CHECK-LABEL: extract_nxv32i8_nxv2i8_6:
282; CHECK:       # %bb.0:
283; CHECK-NEXT:    csrr a0, vlenb
284; CHECK-NEXT:    srli a0, a0, 3
285; CHECK-NEXT:    li a1, 6
286; CHECK-NEXT:    mul a0, a0, a1
287; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
288; CHECK-NEXT:    vslidedown.vx v8, v8, a0
289; CHECK-NEXT:    ret
290  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 6)
291  ret <vscale x 2 x i8> %c
292}
293
294define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_8(<vscale x 32 x i8> %vec) {
295; CHECK-LABEL: extract_nxv32i8_nxv2i8_8:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    vmv1r.v v8, v9
298; CHECK-NEXT:    ret
299  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 8)
300  ret <vscale x 2 x i8> %c
301}
302
303define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_22(<vscale x 32 x i8> %vec) {
304; CHECK-LABEL: extract_nxv32i8_nxv2i8_22:
305; CHECK:       # %bb.0:
306; CHECK-NEXT:    csrr a0, vlenb
307; CHECK-NEXT:    srli a0, a0, 3
308; CHECK-NEXT:    li a1, 6
309; CHECK-NEXT:    mul a0, a0, a1
310; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
311; CHECK-NEXT:    vslidedown.vx v8, v10, a0
312; CHECK-NEXT:    ret
313  %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 22)
314  ret <vscale x 2 x i8> %c
315}
316
317define <vscale x 1 x i8> @extract_nxv8i8_nxv1i8_7(<vscale x 8 x i8> %vec) {
318; CHECK-LABEL: extract_nxv8i8_nxv1i8_7:
319; CHECK:       # %bb.0:
320; CHECK-NEXT:    csrr a0, vlenb
321; CHECK-NEXT:    srli a1, a0, 3
322; CHECK-NEXT:    sub a0, a0, a1
323; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
324; CHECK-NEXT:    vslidedown.vx v8, v8, a0
325; CHECK-NEXT:    ret
326  %c = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> %vec, i64 7)
327  ret <vscale x 1 x i8> %c
328}
329
330define <vscale x 1 x i8> @extract_nxv4i8_nxv1i8_3(<vscale x 4 x i8> %vec) {
331; CHECK-LABEL: extract_nxv4i8_nxv1i8_3:
332; CHECK:       # %bb.0:
333; CHECK-NEXT:    csrr a0, vlenb
334; CHECK-NEXT:    srli a0, a0, 3
335; CHECK-NEXT:    slli a1, a0, 1
336; CHECK-NEXT:    add a0, a1, a0
337; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
338; CHECK-NEXT:    vslidedown.vx v8, v8, a0
339; CHECK-NEXT:    ret
340  %c = call <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 3)
341  ret <vscale x 1 x i8> %c
342}
343
344define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_0(<vscale x 16 x half> %vec) {
345; CHECK-LABEL: extract_nxv2f16_nxv16f16_0:
346; CHECK:       # %bb.0:
347; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m4
348; CHECK-NEXT:    ret
349  %c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 0)
350  ret <vscale x 2 x half> %c
351}
352
353define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_2(<vscale x 16 x half> %vec) {
354; CHECK-LABEL: extract_nxv2f16_nxv16f16_2:
355; CHECK:       # %bb.0:
356; CHECK-NEXT:    csrr a0, vlenb
357; CHECK-NEXT:    srli a0, a0, 2
358; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
359; CHECK-NEXT:    vslidedown.vx v8, v8, a0
360; CHECK-NEXT:    ret
361  %c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 2)
362  ret <vscale x 2 x half> %c
363}
364
365define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_4(<vscale x 16 x half> %vec) {
366; CHECK-LABEL: extract_nxv2f16_nxv16f16_4:
367; CHECK:       # %bb.0:
368; CHECK-NEXT:    vmv1r.v v8, v9
369; CHECK-NEXT:    ret
370  %c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 4)
371  ret <vscale x 2 x half> %c
372}
373
374define <vscale x 8 x i1> @extract_nxv64i1_nxv8i1_0(<vscale x 64 x i1> %mask) {
375; CHECK-LABEL: extract_nxv64i1_nxv8i1_0:
376; CHECK:       # %bb.0:
377; CHECK-NEXT:    ret
378  %c = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1(<vscale x 64 x i1> %mask, i64 0)
379  ret <vscale x 8 x i1> %c
380}
381
382define <vscale x 8 x i1> @extract_nxv64i1_nxv8i1_8(<vscale x 64 x i1> %mask) {
383; CHECK-LABEL: extract_nxv64i1_nxv8i1_8:
384; CHECK:       # %bb.0:
385; CHECK-NEXT:    csrr a0, vlenb
386; CHECK-NEXT:    srli a0, a0, 3
387; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
388; CHECK-NEXT:    vslidedown.vx v0, v0, a0
389; CHECK-NEXT:    ret
390  %c = call <vscale x 8 x i1> @llvm.vector.extract.nxv8i1(<vscale x 64 x i1> %mask, i64 8)
391  ret <vscale x 8 x i1> %c
392}
393
394define <vscale x 2 x i1> @extract_nxv64i1_nxv2i1_0(<vscale x 64 x i1> %mask) {
395; CHECK-LABEL: extract_nxv64i1_nxv2i1_0:
396; CHECK:       # %bb.0:
397; CHECK-NEXT:    ret
398  %c = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1(<vscale x 64 x i1> %mask, i64 0)
399  ret <vscale x 2 x i1> %c
400}
401
402define <vscale x 2 x i1> @extract_nxv64i1_nxv2i1_2(<vscale x 64 x i1> %mask) {
403; CHECK-LABEL: extract_nxv64i1_nxv2i1_2:
404; CHECK:       # %bb.0:
405; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
406; CHECK-NEXT:    vmv.v.i v8, 0
407; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
408; CHECK-NEXT:    csrr a0, vlenb
409; CHECK-NEXT:    srli a0, a0, 2
410; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
411; CHECK-NEXT:    vslidedown.vx v8, v8, a0
412; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
413; CHECK-NEXT:    vmsne.vi v0, v8, 0
414; CHECK-NEXT:    ret
415  %c = call <vscale x 2 x i1> @llvm.vector.extract.nxv2i1(<vscale x 64 x i1> %mask, i64 2)
416  ret <vscale x 2 x i1> %c
417}
418
419define <vscale x 4 x i1> @extract_nxv4i1_nxv32i1_0(<vscale x 32 x i1> %x) {
420; CHECK-LABEL: extract_nxv4i1_nxv32i1_0:
421; CHECK:       # %bb.0:
422; CHECK-NEXT:    ret
423  %c = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 0)
424  ret <vscale x 4 x i1> %c
425}
426
427define <vscale x 4 x i1> @extract_nxv4i1_nxv32i1_4(<vscale x 32 x i1> %x) {
428; CHECK-LABEL: extract_nxv4i1_nxv32i1_4:
429; CHECK:       # %bb.0:
430; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
431; CHECK-NEXT:    vmv.v.i v8, 0
432; CHECK-NEXT:    vmerge.vim v8, v8, 1, v0
433; CHECK-NEXT:    csrr a0, vlenb
434; CHECK-NEXT:    srli a0, a0, 1
435; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
436; CHECK-NEXT:    vslidedown.vx v8, v8, a0
437; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
438; CHECK-NEXT:    vmsne.vi v0, v8, 0
439; CHECK-NEXT:    ret
440  %c = call <vscale x 4 x i1> @llvm.vector.extract.nxv4i1(<vscale x 32 x i1> %x, i64 4)
441  ret <vscale x 4 x i1> %c
442}
443
444define <vscale x 16 x i1> @extract_nxv16i1_nxv32i1_0(<vscale x 32 x i1> %x) {
445; CHECK-LABEL: extract_nxv16i1_nxv32i1_0:
446; CHECK:       # %bb.0:
447; CHECK-NEXT:    ret
448  %c = call <vscale x 16 x i1> @llvm.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 0)
449  ret <vscale x 16 x i1> %c
450}
451
452define <vscale x 16 x i1> @extract_nxv16i1_nxv32i1_16(<vscale x 32 x i1> %x) {
453; CHECK-LABEL: extract_nxv16i1_nxv32i1_16:
454; CHECK:       # %bb.0:
455; CHECK-NEXT:    csrr a0, vlenb
456; CHECK-NEXT:    srli a0, a0, 2
457; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
458; CHECK-NEXT:    vslidedown.vx v0, v0, a0
459; CHECK-NEXT:    ret
460  %c = call <vscale x 16 x i1> @llvm.vector.extract.nxv16i1(<vscale x 32 x i1> %x, i64 16)
461  ret <vscale x 16 x i1> %c
462}
463
464;
465; Extract f16 vector that needs widening from one that needs widening.
466;
467define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_0(<vscale x 12 x half> %in) {
468; CHECK-LABEL: extract_nxv6f16_nxv12f16_0:
469; CHECK:       # %bb.0:
470; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m4
471; CHECK-NEXT:    ret
472  %res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 0)
473  ret <vscale x 6 x half> %res
474}
475
476define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_6(<vscale x 12 x half> %in) {
477; CHECK-LABEL: extract_nxv6f16_nxv12f16_6:
478; CHECK:       # %bb.0:
479; CHECK-NEXT:    csrr a0, vlenb
480; CHECK-NEXT:    srli a0, a0, 2
481; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
482; CHECK-NEXT:    vslidedown.vx v14, v10, a0
483; CHECK-NEXT:    vslidedown.vx v12, v9, a0
484; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
485; CHECK-NEXT:    vslideup.vi v13, v14, 0
486; CHECK-NEXT:    add a1, a0, a0
487; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
488; CHECK-NEXT:    vslideup.vx v12, v10, a0
489; CHECK-NEXT:    vmv2r.v v8, v12
490; CHECK-NEXT:    ret
491  %res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 6)
492  ret <vscale x 6 x half> %res
493}
494
495declare <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half>, i64)
496
497declare <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv4i8(<vscale x 4 x i8> %vec, i64 %idx)
498declare <vscale x 1 x i8> @llvm.vector.extract.nxv1i8.nxv8i8(<vscale x 8 x i8> %vec, i64 %idx)
499
500declare <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 %idx)
501
502declare <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv2i32(<vscale x 2 x i32> %vec, i64 %idx)
503
504declare <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 %idx)
505declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 %idx)
506
507declare <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
508declare <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
509declare <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
510declare <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 %idx)
511
512declare <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 %idx)
513
514declare <vscale x 4 x i1> @llvm.vector.extract.nxv4i1(<vscale x 32 x i1> %vec, i64 %idx)
515declare <vscale x 16 x i1> @llvm.vector.extract.nxv16i1(<vscale x 32 x i1> %vec, i64 %idx)
516
517declare <vscale x 2 x i1> @llvm.vector.extract.nxv2i1(<vscale x 64 x i1> %vec, i64 %idx)
518declare <vscale x 8 x i1> @llvm.vector.extract.nxv8i1(<vscale x 64 x i1> %vec, i64 %idx)
519