1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=ilp32d \
3; RUN:     -verify-machineinstrs < %s | FileCheck %s
4; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -target-abi=lp64d \
5; RUN:     -verify-machineinstrs < %s | FileCheck %s
6
7define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) {
8; CHECK-LABEL: vfneg_vv_nxv1f16:
9; CHECK:       # %bb.0:
10; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
11; CHECK-NEXT:    vfneg.v v8, v8
12; CHECK-NEXT:    ret
13  %vb = fneg <vscale x 1 x half> %va
14  ret <vscale x 1 x half> %vb
15}
16
17define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va) {
18; CHECK-LABEL: vfneg_vv_nxv2f16:
19; CHECK:       # %bb.0:
20; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
21; CHECK-NEXT:    vfneg.v v8, v8
22; CHECK-NEXT:    ret
23  %vb = fneg <vscale x 2 x half> %va
24  ret <vscale x 2 x half> %vb
25}
26
27define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va) {
28; CHECK-LABEL: vfneg_vv_nxv4f16:
29; CHECK:       # %bb.0:
30; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
31; CHECK-NEXT:    vfneg.v v8, v8
32; CHECK-NEXT:    ret
33  %vb = fneg <vscale x 4 x half> %va
34  ret <vscale x 4 x half> %vb
35}
36
37define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va) {
38; CHECK-LABEL: vfneg_vv_nxv8f16:
39; CHECK:       # %bb.0:
40; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
41; CHECK-NEXT:    vfneg.v v8, v8
42; CHECK-NEXT:    ret
43  %vb = fneg <vscale x 8 x half> %va
44  ret <vscale x 8 x half> %vb
45}
46
47define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va) {
48; CHECK-LABEL: vfneg_vv_nxv16f16:
49; CHECK:       # %bb.0:
50; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
51; CHECK-NEXT:    vfneg.v v8, v8
52; CHECK-NEXT:    ret
53  %vb = fneg <vscale x 16 x half> %va
54  ret <vscale x 16 x half> %vb
55}
56
57define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va) {
58; CHECK-LABEL: vfneg_vv_nxv32f16:
59; CHECK:       # %bb.0:
60; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
61; CHECK-NEXT:    vfneg.v v8, v8
62; CHECK-NEXT:    ret
63  %vb = fneg <vscale x 32 x half> %va
64  ret <vscale x 32 x half> %vb
65}
66
67define <vscale x 1 x float> @vfneg_vv_nxv1f32(<vscale x 1 x float> %va) {
68; CHECK-LABEL: vfneg_vv_nxv1f32:
69; CHECK:       # %bb.0:
70; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
71; CHECK-NEXT:    vfneg.v v8, v8
72; CHECK-NEXT:    ret
73  %vb = fneg <vscale x 1 x float> %va
74  ret <vscale x 1 x float> %vb
75}
76
77define <vscale x 2 x float> @vfneg_vv_nxv2f32(<vscale x 2 x float> %va) {
78; CHECK-LABEL: vfneg_vv_nxv2f32:
79; CHECK:       # %bb.0:
80; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
81; CHECK-NEXT:    vfneg.v v8, v8
82; CHECK-NEXT:    ret
83  %vb = fneg <vscale x 2 x float> %va
84  ret <vscale x 2 x float> %vb
85}
86
87define <vscale x 4 x float> @vfneg_vv_nxv4f32(<vscale x 4 x float> %va) {
88; CHECK-LABEL: vfneg_vv_nxv4f32:
89; CHECK:       # %bb.0:
90; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
91; CHECK-NEXT:    vfneg.v v8, v8
92; CHECK-NEXT:    ret
93  %vb = fneg <vscale x 4 x float> %va
94  ret <vscale x 4 x float> %vb
95}
96
97define <vscale x 8 x float> @vfneg_vv_nxv8f32(<vscale x 8 x float> %va) {
98; CHECK-LABEL: vfneg_vv_nxv8f32:
99; CHECK:       # %bb.0:
100; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
101; CHECK-NEXT:    vfneg.v v8, v8
102; CHECK-NEXT:    ret
103  %vb = fneg <vscale x 8 x float> %va
104  ret <vscale x 8 x float> %vb
105}
106
107define <vscale x 16 x float> @vfneg_vv_nxv16f32(<vscale x 16 x float> %va) {
108; CHECK-LABEL: vfneg_vv_nxv16f32:
109; CHECK:       # %bb.0:
110; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
111; CHECK-NEXT:    vfneg.v v8, v8
112; CHECK-NEXT:    ret
113  %vb = fneg <vscale x 16 x float> %va
114  ret <vscale x 16 x float> %vb
115}
116
117define <vscale x 1 x double> @vfneg_vv_nxv1f64(<vscale x 1 x double> %va) {
118; CHECK-LABEL: vfneg_vv_nxv1f64:
119; CHECK:       # %bb.0:
120; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
121; CHECK-NEXT:    vfneg.v v8, v8
122; CHECK-NEXT:    ret
123  %vb = fneg <vscale x 1 x double> %va
124  ret <vscale x 1 x double> %vb
125}
126
127define <vscale x 2 x double> @vfneg_vv_nxv2f64(<vscale x 2 x double> %va) {
128; CHECK-LABEL: vfneg_vv_nxv2f64:
129; CHECK:       # %bb.0:
130; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
131; CHECK-NEXT:    vfneg.v v8, v8
132; CHECK-NEXT:    ret
133  %vb = fneg <vscale x 2 x double> %va
134  ret <vscale x 2 x double> %vb
135}
136
137define <vscale x 4 x double> @vfneg_vv_nxv4f64(<vscale x 4 x double> %va) {
138; CHECK-LABEL: vfneg_vv_nxv4f64:
139; CHECK:       # %bb.0:
140; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
141; CHECK-NEXT:    vfneg.v v8, v8
142; CHECK-NEXT:    ret
143  %vb = fneg <vscale x 4 x double> %va
144  ret <vscale x 4 x double> %vb
145}
146
147define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va) {
148; CHECK-LABEL: vfneg_vv_nxv8f64:
149; CHECK:       # %bb.0:
150; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
151; CHECK-NEXT:    vfneg.v v8, v8
152; CHECK-NEXT:    ret
153  %vb = fneg <vscale x 8 x double> %va
154  ret <vscale x 8 x double> %vb
155}
156