1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6
7define <vscale x 1 x half> @vfptrunc_nxv1f32_nxv1f16(<vscale x 1 x float> %va) {
8;
9; CHECK-LABEL: vfptrunc_nxv1f32_nxv1f16:
10; CHECK:       # %bb.0:
11; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
12; CHECK-NEXT:    vfncvt.f.f.w v9, v8
13; CHECK-NEXT:    vmv1r.v v8, v9
14; CHECK-NEXT:    ret
15  %evec = fptrunc <vscale x 1 x float> %va to <vscale x 1 x half>
16  ret <vscale x 1 x half> %evec
17}
18
19define <vscale x 2 x half> @vfptrunc_nxv2f32_nxv2f16(<vscale x 2 x float> %va) {
20;
21; CHECK-LABEL: vfptrunc_nxv2f32_nxv2f16:
22; CHECK:       # %bb.0:
23; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
24; CHECK-NEXT:    vfncvt.f.f.w v9, v8
25; CHECK-NEXT:    vmv1r.v v8, v9
26; CHECK-NEXT:    ret
27  %evec = fptrunc <vscale x 2 x float> %va to <vscale x 2 x half>
28  ret <vscale x 2 x half> %evec
29}
30
31define <vscale x 4 x half> @vfptrunc_nxv4f32_nxv4f16(<vscale x 4 x float> %va) {
32;
33; CHECK-LABEL: vfptrunc_nxv4f32_nxv4f16:
34; CHECK:       # %bb.0:
35; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
36; CHECK-NEXT:    vfncvt.f.f.w v10, v8
37; CHECK-NEXT:    vmv.v.v v8, v10
38; CHECK-NEXT:    ret
39  %evec = fptrunc <vscale x 4 x float> %va to <vscale x 4 x half>
40  ret <vscale x 4 x half> %evec
41}
42
43define <vscale x 8 x half> @vfptrunc_nxv8f32_nxv8f16(<vscale x 8 x float> %va) {
44;
45; CHECK-LABEL: vfptrunc_nxv8f32_nxv8f16:
46; CHECK:       # %bb.0:
47; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
48; CHECK-NEXT:    vfncvt.f.f.w v12, v8
49; CHECK-NEXT:    vmv.v.v v8, v12
50; CHECK-NEXT:    ret
51  %evec = fptrunc <vscale x 8 x float> %va to <vscale x 8 x half>
52  ret <vscale x 8 x half> %evec
53}
54
55define <vscale x 16 x half> @vfptrunc_nxv16f32_nxv16f16(<vscale x 16 x float> %va) {
56;
57; CHECK-LABEL: vfptrunc_nxv16f32_nxv16f16:
58; CHECK:       # %bb.0:
59; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
60; CHECK-NEXT:    vfncvt.f.f.w v16, v8
61; CHECK-NEXT:    vmv.v.v v8, v16
62; CHECK-NEXT:    ret
63  %evec = fptrunc <vscale x 16 x float> %va to <vscale x 16 x half>
64  ret <vscale x 16 x half> %evec
65}
66
67define <vscale x 1 x half> @vfptrunc_nxv1f64_nxv1f16(<vscale x 1 x double> %va) {
68;
69; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f16:
70; CHECK:       # %bb.0:
71; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
72; CHECK-NEXT:    vfncvt.rod.f.f.w v9, v8
73; CHECK-NEXT:    vsetvli zero, zero, e16, mf4, ta, mu
74; CHECK-NEXT:    vfncvt.f.f.w v8, v9
75; CHECK-NEXT:    ret
76  %evec = fptrunc <vscale x 1 x double> %va to <vscale x 1 x half>
77  ret <vscale x 1 x half> %evec
78}
79
80define <vscale x 1 x float> @vfptrunc_nxv1f64_nxv1f32(<vscale x 1 x double> %va) {
81;
82; CHECK-LABEL: vfptrunc_nxv1f64_nxv1f32:
83; CHECK:       # %bb.0:
84; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
85; CHECK-NEXT:    vfncvt.f.f.w v9, v8
86; CHECK-NEXT:    vmv1r.v v8, v9
87; CHECK-NEXT:    ret
88  %evec = fptrunc <vscale x 1 x double> %va to <vscale x 1 x float>
89  ret <vscale x 1 x float> %evec
90}
91
92define <vscale x 2 x half> @vfptrunc_nxv2f64_nxv2f16(<vscale x 2 x double> %va) {
93;
94; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f16:
95; CHECK:       # %bb.0:
96; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
97; CHECK-NEXT:    vfncvt.rod.f.f.w v10, v8
98; CHECK-NEXT:    vsetvli zero, zero, e16, mf2, ta, mu
99; CHECK-NEXT:    vfncvt.f.f.w v8, v10
100; CHECK-NEXT:    ret
101  %evec = fptrunc <vscale x 2 x double> %va to <vscale x 2 x half>
102  ret <vscale x 2 x half> %evec
103}
104
105define <vscale x 2 x float> @vfptrunc_nxv2f64_nxv2f32(<vscale x 2 x double> %va) {
106;
107; CHECK-LABEL: vfptrunc_nxv2f64_nxv2f32:
108; CHECK:       # %bb.0:
109; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
110; CHECK-NEXT:    vfncvt.f.f.w v10, v8
111; CHECK-NEXT:    vmv.v.v v8, v10
112; CHECK-NEXT:    ret
113  %evec = fptrunc <vscale x 2 x double> %va to <vscale x 2 x float>
114  ret <vscale x 2 x float> %evec
115}
116
117define <vscale x 4 x half> @vfptrunc_nxv4f64_nxv4f16(<vscale x 4 x double> %va) {
118;
119; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f16:
120; CHECK:       # %bb.0:
121; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
122; CHECK-NEXT:    vfncvt.rod.f.f.w v12, v8
123; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, mu
124; CHECK-NEXT:    vfncvt.f.f.w v8, v12
125; CHECK-NEXT:    ret
126  %evec = fptrunc <vscale x 4 x double> %va to <vscale x 4 x half>
127  ret <vscale x 4 x half> %evec
128}
129
130define <vscale x 4 x float> @vfptrunc_nxv4f64_nxv4f32(<vscale x 4 x double> %va) {
131;
132; CHECK-LABEL: vfptrunc_nxv4f64_nxv4f32:
133; CHECK:       # %bb.0:
134; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
135; CHECK-NEXT:    vfncvt.f.f.w v12, v8
136; CHECK-NEXT:    vmv.v.v v8, v12
137; CHECK-NEXT:    ret
138  %evec = fptrunc <vscale x 4 x double> %va to <vscale x 4 x float>
139  ret <vscale x 4 x float> %evec
140}
141
142define <vscale x 8 x half> @vfptrunc_nxv8f64_nxv8f16(<vscale x 8 x double> %va) {
143;
144; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f16:
145; CHECK:       # %bb.0:
146; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
147; CHECK-NEXT:    vfncvt.rod.f.f.w v16, v8
148; CHECK-NEXT:    vsetvli zero, zero, e16, m2, ta, mu
149; CHECK-NEXT:    vfncvt.f.f.w v8, v16
150; CHECK-NEXT:    ret
151  %evec = fptrunc <vscale x 8 x double> %va to <vscale x 8 x half>
152  ret <vscale x 8 x half> %evec
153}
154
155define <vscale x 8 x float> @vfptrunc_nxv8f64_nxv8f32(<vscale x 8 x double> %va) {
156;
157; CHECK-LABEL: vfptrunc_nxv8f64_nxv8f32:
158; CHECK:       # %bb.0:
159; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
160; CHECK-NEXT:    vfncvt.f.f.w v16, v8
161; CHECK-NEXT:    vmv.v.v v8, v16
162; CHECK-NEXT:    ret
163  %evec = fptrunc <vscale x 8 x double> %va to <vscale x 8 x float>
164  ret <vscale x 8 x float> %evec
165}
166