1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32IF %s
4; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV64IF %s
6
7; For RV64F, fcvt.l.s is semantically equivalent to fcvt.w.s in this case
8; because fptosi will produce poison if the result doesn't fit into an i32.
9define i32 @fcvt_w_s(float %a) nounwind {
10; RV32IF-LABEL: fcvt_w_s:
11; RV32IF:       # %bb.0:
12; RV32IF-NEXT:    fmv.w.x ft0, a0
13; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
14; RV32IF-NEXT:    ret
15;
16; RV64IF-LABEL: fcvt_w_s:
17; RV64IF:       # %bb.0:
18; RV64IF-NEXT:    fmv.w.x ft0, a0
19; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
20; RV64IF-NEXT:    ret
21  %1 = fptosi float %a to i32
22  ret i32 %1
23}
24
25; For RV64F, fcvt.lu.s is semantically equivalent to fcvt.wu.s in this case
26; because fptoui will produce poison if the result doesn't fit into an i32.
27define i32 @fcvt_wu_s(float %a) nounwind {
28; RV32IF-LABEL: fcvt_wu_s:
29; RV32IF:       # %bb.0:
30; RV32IF-NEXT:    fmv.w.x ft0, a0
31; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
32; RV32IF-NEXT:    ret
33;
34; RV64IF-LABEL: fcvt_wu_s:
35; RV64IF:       # %bb.0:
36; RV64IF-NEXT:    fmv.w.x ft0, a0
37; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
38; RV64IF-NEXT:    ret
39  %1 = fptoui float %a to i32
40  ret i32 %1
41}
42
43define i32 @fmv_x_w(float %a, float %b) nounwind {
44; RV32IF-LABEL: fmv_x_w:
45; RV32IF:       # %bb.0:
46; RV32IF-NEXT:    fmv.w.x ft0, a1
47; RV32IF-NEXT:    fmv.w.x ft1, a0
48; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
49; RV32IF-NEXT:    fmv.x.w a0, ft0
50; RV32IF-NEXT:    ret
51;
52; RV64IF-LABEL: fmv_x_w:
53; RV64IF:       # %bb.0:
54; RV64IF-NEXT:    fmv.w.x ft0, a1
55; RV64IF-NEXT:    fmv.w.x ft1, a0
56; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
57; RV64IF-NEXT:    fmv.x.w a0, ft0
58; RV64IF-NEXT:    ret
59; Ensure fmv.x.w is generated even for a soft float calling convention
60  %1 = fadd float %a, %b
61  %2 = bitcast float %1 to i32
62  ret i32 %2
63}
64
65define float @fcvt_s_w(i32 %a) nounwind {
66; RV32IF-LABEL: fcvt_s_w:
67; RV32IF:       # %bb.0:
68; RV32IF-NEXT:    fcvt.s.w ft0, a0
69; RV32IF-NEXT:    fmv.x.w a0, ft0
70; RV32IF-NEXT:    ret
71;
72; RV64IF-LABEL: fcvt_s_w:
73; RV64IF:       # %bb.0:
74; RV64IF-NEXT:    fcvt.s.w ft0, a0
75; RV64IF-NEXT:    fmv.x.w a0, ft0
76; RV64IF-NEXT:    ret
77  %1 = sitofp i32 %a to float
78  ret float %1
79}
80
81define float @fcvt_s_wu(i32 %a) nounwind {
82; RV32IF-LABEL: fcvt_s_wu:
83; RV32IF:       # %bb.0:
84; RV32IF-NEXT:    fcvt.s.wu ft0, a0
85; RV32IF-NEXT:    fmv.x.w a0, ft0
86; RV32IF-NEXT:    ret
87;
88; RV64IF-LABEL: fcvt_s_wu:
89; RV64IF:       # %bb.0:
90; RV64IF-NEXT:    fcvt.s.wu ft0, a0
91; RV64IF-NEXT:    fmv.x.w a0, ft0
92; RV64IF-NEXT:    ret
93  %1 = uitofp i32 %a to float
94  ret float %1
95}
96
97define float @fmv_w_x(i32 %a, i32 %b) nounwind {
98; RV32IF-LABEL: fmv_w_x:
99; RV32IF:       # %bb.0:
100; RV32IF-NEXT:    fmv.w.x ft0, a0
101; RV32IF-NEXT:    fmv.w.x ft1, a1
102; RV32IF-NEXT:    fadd.s ft0, ft0, ft1
103; RV32IF-NEXT:    fmv.x.w a0, ft0
104; RV32IF-NEXT:    ret
105;
106; RV64IF-LABEL: fmv_w_x:
107; RV64IF:       # %bb.0:
108; RV64IF-NEXT:    fmv.w.x ft0, a0
109; RV64IF-NEXT:    fmv.w.x ft1, a1
110; RV64IF-NEXT:    fadd.s ft0, ft0, ft1
111; RV64IF-NEXT:    fmv.x.w a0, ft0
112; RV64IF-NEXT:    ret
113; Ensure fmv.w.x is generated even for a soft float calling convention
114  %1 = bitcast i32 %a to float
115  %2 = bitcast i32 %b to float
116  %3 = fadd float %1, %2
117  ret float %3
118}
119
120define i64 @fcvt_l_s(float %a) nounwind {
121; RV32IF-LABEL: fcvt_l_s:
122; RV32IF:       # %bb.0:
123; RV32IF-NEXT:    addi sp, sp, -16
124; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
125; RV32IF-NEXT:    call __fixsfdi@plt
126; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
127; RV32IF-NEXT:    addi sp, sp, 16
128; RV32IF-NEXT:    ret
129;
130; RV64IF-LABEL: fcvt_l_s:
131; RV64IF:       # %bb.0:
132; RV64IF-NEXT:    fmv.w.x ft0, a0
133; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
134; RV64IF-NEXT:    ret
135  %1 = fptosi float %a to i64
136  ret i64 %1
137}
138
139define i64 @fcvt_lu_s(float %a) nounwind {
140; RV32IF-LABEL: fcvt_lu_s:
141; RV32IF:       # %bb.0:
142; RV32IF-NEXT:    addi sp, sp, -16
143; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
144; RV32IF-NEXT:    call __fixunssfdi@plt
145; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
146; RV32IF-NEXT:    addi sp, sp, 16
147; RV32IF-NEXT:    ret
148;
149; RV64IF-LABEL: fcvt_lu_s:
150; RV64IF:       # %bb.0:
151; RV64IF-NEXT:    fmv.w.x ft0, a0
152; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
153; RV64IF-NEXT:    ret
154  %1 = fptoui float %a to i64
155  ret i64 %1
156}
157
158define float @fcvt_s_l(i64 %a) nounwind {
159; RV32IF-LABEL: fcvt_s_l:
160; RV32IF:       # %bb.0:
161; RV32IF-NEXT:    addi sp, sp, -16
162; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
163; RV32IF-NEXT:    call __floatdisf@plt
164; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
165; RV32IF-NEXT:    addi sp, sp, 16
166; RV32IF-NEXT:    ret
167;
168; RV64IF-LABEL: fcvt_s_l:
169; RV64IF:       # %bb.0:
170; RV64IF-NEXT:    fcvt.s.l ft0, a0
171; RV64IF-NEXT:    fmv.x.w a0, ft0
172; RV64IF-NEXT:    ret
173  %1 = sitofp i64 %a to float
174  ret float %1
175}
176
177define float @fcvt_s_lu(i64 %a) nounwind {
178; RV32IF-LABEL: fcvt_s_lu:
179; RV32IF:       # %bb.0:
180; RV32IF-NEXT:    addi sp, sp, -16
181; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
182; RV32IF-NEXT:    call __floatundisf@plt
183; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
184; RV32IF-NEXT:    addi sp, sp, 16
185; RV32IF-NEXT:    ret
186;
187; RV64IF-LABEL: fcvt_s_lu:
188; RV64IF:       # %bb.0:
189; RV64IF-NEXT:    fcvt.s.lu ft0, a0
190; RV64IF-NEXT:    fmv.x.w a0, ft0
191; RV64IF-NEXT:    ret
192  %1 = uitofp i64 %a to float
193  ret float %1
194}
195
196define float @fcvt_s_w_i8(i8 signext %a) nounwind {
197; RV32IF-LABEL: fcvt_s_w_i8:
198; RV32IF:       # %bb.0:
199; RV32IF-NEXT:    fcvt.s.w ft0, a0
200; RV32IF-NEXT:    fmv.x.w a0, ft0
201; RV32IF-NEXT:    ret
202;
203; RV64IF-LABEL: fcvt_s_w_i8:
204; RV64IF:       # %bb.0:
205; RV64IF-NEXT:    fcvt.s.w ft0, a0
206; RV64IF-NEXT:    fmv.x.w a0, ft0
207; RV64IF-NEXT:    ret
208  %1 = sitofp i8 %a to float
209  ret float %1
210}
211
212define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
213; RV32IF-LABEL: fcvt_s_wu_i8:
214; RV32IF:       # %bb.0:
215; RV32IF-NEXT:    fcvt.s.wu ft0, a0
216; RV32IF-NEXT:    fmv.x.w a0, ft0
217; RV32IF-NEXT:    ret
218;
219; RV64IF-LABEL: fcvt_s_wu_i8:
220; RV64IF:       # %bb.0:
221; RV64IF-NEXT:    fcvt.s.wu ft0, a0
222; RV64IF-NEXT:    fmv.x.w a0, ft0
223; RV64IF-NEXT:    ret
224  %1 = uitofp i8 %a to float
225  ret float %1
226}
227
228define float @fcvt_s_w_i16(i16 signext %a) nounwind {
229; RV32IF-LABEL: fcvt_s_w_i16:
230; RV32IF:       # %bb.0:
231; RV32IF-NEXT:    fcvt.s.w ft0, a0
232; RV32IF-NEXT:    fmv.x.w a0, ft0
233; RV32IF-NEXT:    ret
234;
235; RV64IF-LABEL: fcvt_s_w_i16:
236; RV64IF:       # %bb.0:
237; RV64IF-NEXT:    fcvt.s.w ft0, a0
238; RV64IF-NEXT:    fmv.x.w a0, ft0
239; RV64IF-NEXT:    ret
240  %1 = sitofp i16 %a to float
241  ret float %1
242}
243
244define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
245; RV32IF-LABEL: fcvt_s_wu_i16:
246; RV32IF:       # %bb.0:
247; RV32IF-NEXT:    fcvt.s.wu ft0, a0
248; RV32IF-NEXT:    fmv.x.w a0, ft0
249; RV32IF-NEXT:    ret
250;
251; RV64IF-LABEL: fcvt_s_wu_i16:
252; RV64IF:       # %bb.0:
253; RV64IF-NEXT:    fcvt.s.wu ft0, a0
254; RV64IF-NEXT:    fmv.x.w a0, ft0
255; RV64IF-NEXT:    ret
256  %1 = uitofp i16 %a to float
257  ret float %1
258}
259