1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
3; RUN:   -verify-machineinstrs | FileCheck %s
4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
5; RUN:   -verify-machineinstrs | FileCheck %s
6declare <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
7  <vscale x 1 x i1>,
8  iXLen);
9
10define <vscale x 1 x i1> @intrinsic_vmsof_m_nxv1i1(<vscale x 1 x i1> %0, iXLen %1) nounwind {
11; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
14; CHECK-NEXT:    vmsof.m v8, v0
15; CHECK-NEXT:    vmv1r.v v0, v8
16; CHECK-NEXT:    ret
17entry:
18  %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(
19    <vscale x 1 x i1> %0,
20    iXLen %1)
21  ret <vscale x 1 x i1> %a
22}
23
24declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
25  <vscale x 1 x i1>,
26  <vscale x 1 x i1>,
27  <vscale x 1 x i1>,
28  iXLen);
29
30define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
31; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
32; CHECK:       # %bb.0: # %entry
33; CHECK-NEXT:    vmv1r.v v10, v0
34; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
35; CHECK-NEXT:    vmv1r.v v0, v9
36; CHECK-NEXT:    vmsof.m v10, v8, v0.t
37; CHECK-NEXT:    vmv1r.v v0, v10
38; CHECK-NEXT:    ret
39entry:
40  %a = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
41    <vscale x 1 x i1> %0,
42    <vscale x 1 x i1> %1,
43    <vscale x 1 x i1> %2,
44    iXLen %3)
45  ret <vscale x 1 x i1> %a
46}
47
48declare <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
49  <vscale x 2 x i1>,
50  iXLen);
51
52define <vscale x 2 x i1> @intrinsic_vmsof_m_nxv2i1(<vscale x 2 x i1> %0, iXLen %1) nounwind {
53; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1:
54; CHECK:       # %bb.0: # %entry
55; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
56; CHECK-NEXT:    vmsof.m v8, v0
57; CHECK-NEXT:    vmv1r.v v0, v8
58; CHECK-NEXT:    ret
59entry:
60  %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.nxv2i1(
61    <vscale x 2 x i1> %0,
62    iXLen %1)
63  ret <vscale x 2 x i1> %a
64}
65
66declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
67  <vscale x 2 x i1>,
68  <vscale x 2 x i1>,
69  <vscale x 2 x i1>,
70  iXLen);
71
72define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
73; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
74; CHECK:       # %bb.0: # %entry
75; CHECK-NEXT:    vmv1r.v v10, v0
76; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
77; CHECK-NEXT:    vmv1r.v v0, v9
78; CHECK-NEXT:    vmsof.m v10, v8, v0.t
79; CHECK-NEXT:    vmv1r.v v0, v10
80; CHECK-NEXT:    ret
81entry:
82  %a = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
83    <vscale x 2 x i1> %0,
84    <vscale x 2 x i1> %1,
85    <vscale x 2 x i1> %2,
86    iXLen %3)
87  ret <vscale x 2 x i1> %a
88}
89
90declare <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
91  <vscale x 4 x i1>,
92  iXLen);
93
94define <vscale x 4 x i1> @intrinsic_vmsof_m_nxv4i1(<vscale x 4 x i1> %0, iXLen %1) nounwind {
95; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1:
96; CHECK:       # %bb.0: # %entry
97; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
98; CHECK-NEXT:    vmsof.m v8, v0
99; CHECK-NEXT:    vmv1r.v v0, v8
100; CHECK-NEXT:    ret
101entry:
102  %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.nxv4i1(
103    <vscale x 4 x i1> %0,
104    iXLen %1)
105  ret <vscale x 4 x i1> %a
106}
107
108declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
109  <vscale x 4 x i1>,
110  <vscale x 4 x i1>,
111  <vscale x 4 x i1>,
112  iXLen);
113
114define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
115; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
116; CHECK:       # %bb.0: # %entry
117; CHECK-NEXT:    vmv1r.v v10, v0
118; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
119; CHECK-NEXT:    vmv1r.v v0, v9
120; CHECK-NEXT:    vmsof.m v10, v8, v0.t
121; CHECK-NEXT:    vmv1r.v v0, v10
122; CHECK-NEXT:    ret
123entry:
124  %a = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
125    <vscale x 4 x i1> %0,
126    <vscale x 4 x i1> %1,
127    <vscale x 4 x i1> %2,
128    iXLen %3)
129  ret <vscale x 4 x i1> %a
130}
131
132declare <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
133  <vscale x 8 x i1>,
134  iXLen);
135
136define <vscale x 8 x i1> @intrinsic_vmsof_m_nxv8i1(<vscale x 8 x i1> %0, iXLen %1) nounwind {
137; CHECK-LABEL: intrinsic_vmsof_m_nxv8i1:
138; CHECK:       # %bb.0: # %entry
139; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
140; CHECK-NEXT:    vmsof.m v8, v0
141; CHECK-NEXT:    vmv.v.v v0, v8
142; CHECK-NEXT:    ret
143entry:
144  %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.nxv8i1(
145    <vscale x 8 x i1> %0,
146    iXLen %1)
147  ret <vscale x 8 x i1> %a
148}
149
150declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
151  <vscale x 8 x i1>,
152  <vscale x 8 x i1>,
153  <vscale x 8 x i1>,
154  iXLen);
155
156define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
157; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
158; CHECK:       # %bb.0: # %entry
159; CHECK-NEXT:    vmv1r.v v10, v0
160; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
161; CHECK-NEXT:    vmv1r.v v0, v9
162; CHECK-NEXT:    vmsof.m v10, v8, v0.t
163; CHECK-NEXT:    vmv1r.v v0, v10
164; CHECK-NEXT:    ret
165entry:
166  %a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
167    <vscale x 8 x i1> %0,
168    <vscale x 8 x i1> %1,
169    <vscale x 8 x i1> %2,
170    iXLen %3)
171  ret <vscale x 8 x i1> %a
172}
173
174declare <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
175  <vscale x 16 x i1>,
176  iXLen);
177
178define <vscale x 16 x i1> @intrinsic_vmsof_m_nxv16i1(<vscale x 16 x i1> %0, iXLen %1) nounwind {
179; CHECK-LABEL: intrinsic_vmsof_m_nxv16i1:
180; CHECK:       # %bb.0: # %entry
181; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
182; CHECK-NEXT:    vmsof.m v8, v0
183; CHECK-NEXT:    vmv1r.v v0, v8
184; CHECK-NEXT:    ret
185entry:
186  %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.nxv16i1(
187    <vscale x 16 x i1> %0,
188    iXLen %1)
189  ret <vscale x 16 x i1> %a
190}
191
192declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
193  <vscale x 16 x i1>,
194  <vscale x 16 x i1>,
195  <vscale x 16 x i1>,
196  iXLen);
197
198define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
199; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
200; CHECK:       # %bb.0: # %entry
201; CHECK-NEXT:    vmv1r.v v10, v0
202; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
203; CHECK-NEXT:    vmv1r.v v0, v9
204; CHECK-NEXT:    vmsof.m v10, v8, v0.t
205; CHECK-NEXT:    vmv1r.v v0, v10
206; CHECK-NEXT:    ret
207entry:
208  %a = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
209    <vscale x 16 x i1> %0,
210    <vscale x 16 x i1> %1,
211    <vscale x 16 x i1> %2,
212    iXLen %3)
213  ret <vscale x 16 x i1> %a
214}
215
216declare <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
217  <vscale x 32 x i1>,
218  iXLen);
219
220define <vscale x 32 x i1> @intrinsic_vmsof_m_nxv32i1(<vscale x 32 x i1> %0, iXLen %1) nounwind {
221; CHECK-LABEL: intrinsic_vmsof_m_nxv32i1:
222; CHECK:       # %bb.0: # %entry
223; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
224; CHECK-NEXT:    vmsof.m v8, v0
225; CHECK-NEXT:    vmv1r.v v0, v8
226; CHECK-NEXT:    ret
227entry:
228  %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.nxv32i1(
229    <vscale x 32 x i1> %0,
230    iXLen %1)
231  ret <vscale x 32 x i1> %a
232}
233
234declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
235  <vscale x 32 x i1>,
236  <vscale x 32 x i1>,
237  <vscale x 32 x i1>,
238  iXLen);
239
240define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
241; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vmv1r.v v10, v0
244; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
245; CHECK-NEXT:    vmv1r.v v0, v9
246; CHECK-NEXT:    vmsof.m v10, v8, v0.t
247; CHECK-NEXT:    vmv1r.v v0, v10
248; CHECK-NEXT:    ret
249entry:
250  %a = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
251    <vscale x 32 x i1> %0,
252    <vscale x 32 x i1> %1,
253    <vscale x 32 x i1> %2,
254    iXLen %3)
255  ret <vscale x 32 x i1> %a
256}
257
258declare <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
259  <vscale x 64 x i1>,
260  iXLen);
261
262define <vscale x 64 x i1> @intrinsic_vmsof_m_nxv64i1(<vscale x 64 x i1> %0, iXLen %1) nounwind {
263; CHECK-LABEL: intrinsic_vmsof_m_nxv64i1:
264; CHECK:       # %bb.0: # %entry
265; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
266; CHECK-NEXT:    vmsof.m v8, v0
267; CHECK-NEXT:    vmv1r.v v0, v8
268; CHECK-NEXT:    ret
269entry:
270  %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.nxv64i1(
271    <vscale x 64 x i1> %0,
272    iXLen %1)
273  ret <vscale x 64 x i1> %a
274}
275
276declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
277  <vscale x 64 x i1>,
278  <vscale x 64 x i1>,
279  <vscale x 64 x i1>,
280  iXLen);
281
282define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
283; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
284; CHECK:       # %bb.0: # %entry
285; CHECK-NEXT:    vmv1r.v v10, v0
286; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, mu
287; CHECK-NEXT:    vmv1r.v v0, v9
288; CHECK-NEXT:    vmsof.m v10, v8, v0.t
289; CHECK-NEXT:    vmv1r.v v0, v10
290; CHECK-NEXT:    ret
291entry:
292  %a = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
293    <vscale x 64 x i1> %0,
294    <vscale x 64 x i1> %1,
295    <vscale x 64 x i1> %2,
296    iXLen %3)
297  ret <vscale x 64 x i1> %a
298}
299