1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   -disable-strictnode-mutation -target-abi=ilp32f \
4; RUN:   | FileCheck -check-prefix=RV32IF %s
5; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
6; RUN:   -disable-strictnode-mutation -target-abi=lp64f \
7; RUN:   | FileCheck -check-prefix=RV64IF %s
8; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
9; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s
10; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
11; RUN:   -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s
12
13; NOTE: The rounding mode metadata does not effect which instruction is
14; selected. Dynamic rounding mode is always used for operations that
15; support rounding mode.
16
17define i32 @fcvt_w_s(float %a) nounwind strictfp {
18; RV32IF-LABEL: fcvt_w_s:
19; RV32IF:       # %bb.0:
20; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
21; RV32IF-NEXT:    ret
22;
23; RV64IF-LABEL: fcvt_w_s:
24; RV64IF:       # %bb.0:
25; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
26; RV64IF-NEXT:    ret
27;
28; RV32I-LABEL: fcvt_w_s:
29; RV32I:       # %bb.0:
30; RV32I-NEXT:    addi sp, sp, -16
31; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
32; RV32I-NEXT:    call __fixsfsi@plt
33; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
34; RV32I-NEXT:    addi sp, sp, 16
35; RV32I-NEXT:    ret
36;
37; RV64I-LABEL: fcvt_w_s:
38; RV64I:       # %bb.0:
39; RV64I-NEXT:    addi sp, sp, -16
40; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
41; RV64I-NEXT:    call __fixsfsi@plt
42; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
43; RV64I-NEXT:    addi sp, sp, 16
44; RV64I-NEXT:    ret
45  %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") strictfp
46  ret i32 %1
47}
48declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata)
49
50define i32 @fcvt_wu_s(float %a) nounwind strictfp {
51; RV32IF-LABEL: fcvt_wu_s:
52; RV32IF:       # %bb.0:
53; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
54; RV32IF-NEXT:    ret
55;
56; RV64IF-LABEL: fcvt_wu_s:
57; RV64IF:       # %bb.0:
58; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
59; RV64IF-NEXT:    ret
60;
61; RV32I-LABEL: fcvt_wu_s:
62; RV32I:       # %bb.0:
63; RV32I-NEXT:    addi sp, sp, -16
64; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
65; RV32I-NEXT:    call __fixunssfsi@plt
66; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
67; RV32I-NEXT:    addi sp, sp, 16
68; RV32I-NEXT:    ret
69;
70; RV64I-LABEL: fcvt_wu_s:
71; RV64I:       # %bb.0:
72; RV64I-NEXT:    addi sp, sp, -16
73; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
74; RV64I-NEXT:    call __fixunssfsi@plt
75; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
76; RV64I-NEXT:    addi sp, sp, 16
77; RV64I-NEXT:    ret
78  %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") strictfp
79  ret i32 %1
80}
81declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata)
82
83; Test where the fptoui has multiple uses, one of which causes a sext to be
84; inserted on RV64.
85define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
86; RV32IF-LABEL: fcvt_wu_s_multiple_use:
87; RV32IF:       # %bb.0:
88; RV32IF-NEXT:    fcvt.wu.s a1, fa0, rtz
89; RV32IF-NEXT:    li a0, 1
90; RV32IF-NEXT:    beqz a1, .LBB2_2
91; RV32IF-NEXT:  # %bb.1:
92; RV32IF-NEXT:    mv a0, a1
93; RV32IF-NEXT:  .LBB2_2:
94; RV32IF-NEXT:    ret
95;
96; RV64IF-LABEL: fcvt_wu_s_multiple_use:
97; RV64IF:       # %bb.0:
98; RV64IF-NEXT:    fcvt.wu.s a1, fa0, rtz
99; RV64IF-NEXT:    li a0, 1
100; RV64IF-NEXT:    beqz a1, .LBB2_2
101; RV64IF-NEXT:  # %bb.1:
102; RV64IF-NEXT:    mv a0, a1
103; RV64IF-NEXT:  .LBB2_2:
104; RV64IF-NEXT:    ret
105;
106; RV32I-LABEL: fcvt_wu_s_multiple_use:
107; RV32I:       # %bb.0:
108; RV32I-NEXT:    addi sp, sp, -16
109; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
110; RV32I-NEXT:    call __fixunssfsi@plt
111; RV32I-NEXT:    mv a1, a0
112; RV32I-NEXT:    li a0, 1
113; RV32I-NEXT:    beqz a1, .LBB2_2
114; RV32I-NEXT:  # %bb.1:
115; RV32I-NEXT:    mv a0, a1
116; RV32I-NEXT:  .LBB2_2:
117; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
118; RV32I-NEXT:    addi sp, sp, 16
119; RV32I-NEXT:    ret
120;
121; RV64I-LABEL: fcvt_wu_s_multiple_use:
122; RV64I:       # %bb.0:
123; RV64I-NEXT:    addi sp, sp, -16
124; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
125; RV64I-NEXT:    call __fixunssfsi@plt
126; RV64I-NEXT:    mv a1, a0
127; RV64I-NEXT:    li a0, 1
128; RV64I-NEXT:    beqz a1, .LBB2_2
129; RV64I-NEXT:  # %bb.1:
130; RV64I-NEXT:    mv a0, a1
131; RV64I-NEXT:  .LBB2_2:
132; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
133; RV64I-NEXT:    addi sp, sp, 16
134; RV64I-NEXT:    ret
135  %a = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") strictfp
136  %b = icmp eq i32 %a, 0
137  %c = select i1 %b, i32 1, i32 %a
138  ret i32 %c
139}
140
141define float @fcvt_s_w(i32 %a) nounwind strictfp {
142; RV32IF-LABEL: fcvt_s_w:
143; RV32IF:       # %bb.0:
144; RV32IF-NEXT:    fcvt.s.w fa0, a0
145; RV32IF-NEXT:    ret
146;
147; RV64IF-LABEL: fcvt_s_w:
148; RV64IF:       # %bb.0:
149; RV64IF-NEXT:    fcvt.s.w fa0, a0
150; RV64IF-NEXT:    ret
151;
152; RV32I-LABEL: fcvt_s_w:
153; RV32I:       # %bb.0:
154; RV32I-NEXT:    addi sp, sp, -16
155; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
156; RV32I-NEXT:    call __floatsisf@plt
157; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
158; RV32I-NEXT:    addi sp, sp, 16
159; RV32I-NEXT:    ret
160;
161; RV64I-LABEL: fcvt_s_w:
162; RV64I:       # %bb.0:
163; RV64I-NEXT:    addi sp, sp, -16
164; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
165; RV64I-NEXT:    sext.w a0, a0
166; RV64I-NEXT:    call __floatsisf@plt
167; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
168; RV64I-NEXT:    addi sp, sp, 16
169; RV64I-NEXT:    ret
170  %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
171  ret float %1
172}
173declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata)
174
175define float @fcvt_s_w_load(i32* %p) nounwind strictfp {
176; RV32IF-LABEL: fcvt_s_w_load:
177; RV32IF:       # %bb.0:
178; RV32IF-NEXT:    lw a0, 0(a0)
179; RV32IF-NEXT:    fcvt.s.w fa0, a0
180; RV32IF-NEXT:    ret
181;
182; RV64IF-LABEL: fcvt_s_w_load:
183; RV64IF:       # %bb.0:
184; RV64IF-NEXT:    lw a0, 0(a0)
185; RV64IF-NEXT:    fcvt.s.w fa0, a0
186; RV64IF-NEXT:    ret
187;
188; RV32I-LABEL: fcvt_s_w_load:
189; RV32I:       # %bb.0:
190; RV32I-NEXT:    addi sp, sp, -16
191; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
192; RV32I-NEXT:    lw a0, 0(a0)
193; RV32I-NEXT:    call __floatsisf@plt
194; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
195; RV32I-NEXT:    addi sp, sp, 16
196; RV32I-NEXT:    ret
197;
198; RV64I-LABEL: fcvt_s_w_load:
199; RV64I:       # %bb.0:
200; RV64I-NEXT:    addi sp, sp, -16
201; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
202; RV64I-NEXT:    lw a0, 0(a0)
203; RV64I-NEXT:    call __floatsisf@plt
204; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
205; RV64I-NEXT:    addi sp, sp, 16
206; RV64I-NEXT:    ret
207  %a = load i32, i32* %p
208  %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
209  ret float %1
210}
211
212define float @fcvt_s_wu(i32 %a) nounwind strictfp {
213; RV32IF-LABEL: fcvt_s_wu:
214; RV32IF:       # %bb.0:
215; RV32IF-NEXT:    fcvt.s.wu fa0, a0
216; RV32IF-NEXT:    ret
217;
218; RV64IF-LABEL: fcvt_s_wu:
219; RV64IF:       # %bb.0:
220; RV64IF-NEXT:    fcvt.s.wu fa0, a0
221; RV64IF-NEXT:    ret
222;
223; RV32I-LABEL: fcvt_s_wu:
224; RV32I:       # %bb.0:
225; RV32I-NEXT:    addi sp, sp, -16
226; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
227; RV32I-NEXT:    call __floatunsisf@plt
228; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
229; RV32I-NEXT:    addi sp, sp, 16
230; RV32I-NEXT:    ret
231;
232; RV64I-LABEL: fcvt_s_wu:
233; RV64I:       # %bb.0:
234; RV64I-NEXT:    addi sp, sp, -16
235; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
236; RV64I-NEXT:    sext.w a0, a0
237; RV64I-NEXT:    call __floatunsisf@plt
238; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
239; RV64I-NEXT:    addi sp, sp, 16
240; RV64I-NEXT:    ret
241  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
242  ret float %1
243}
244declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata)
245
246define float @fcvt_s_wu_load(i32* %p) nounwind strictfp {
247; RV32IF-LABEL: fcvt_s_wu_load:
248; RV32IF:       # %bb.0:
249; RV32IF-NEXT:    lw a0, 0(a0)
250; RV32IF-NEXT:    fcvt.s.wu fa0, a0
251; RV32IF-NEXT:    ret
252;
253; RV64IF-LABEL: fcvt_s_wu_load:
254; RV64IF:       # %bb.0:
255; RV64IF-NEXT:    lwu a0, 0(a0)
256; RV64IF-NEXT:    fcvt.s.wu fa0, a0
257; RV64IF-NEXT:    ret
258;
259; RV32I-LABEL: fcvt_s_wu_load:
260; RV32I:       # %bb.0:
261; RV32I-NEXT:    addi sp, sp, -16
262; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
263; RV32I-NEXT:    lw a0, 0(a0)
264; RV32I-NEXT:    call __floatunsisf@plt
265; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
266; RV32I-NEXT:    addi sp, sp, 16
267; RV32I-NEXT:    ret
268;
269; RV64I-LABEL: fcvt_s_wu_load:
270; RV64I:       # %bb.0:
271; RV64I-NEXT:    addi sp, sp, -16
272; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
273; RV64I-NEXT:    lw a0, 0(a0)
274; RV64I-NEXT:    call __floatunsisf@plt
275; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
276; RV64I-NEXT:    addi sp, sp, 16
277; RV64I-NEXT:    ret
278  %a = load i32, i32* %p
279  %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
280  ret float %1
281}
282
283define i64 @fcvt_l_s(float %a) nounwind strictfp {
284; RV32IF-LABEL: fcvt_l_s:
285; RV32IF:       # %bb.0:
286; RV32IF-NEXT:    addi sp, sp, -16
287; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
288; RV32IF-NEXT:    call __fixsfdi@plt
289; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
290; RV32IF-NEXT:    addi sp, sp, 16
291; RV32IF-NEXT:    ret
292;
293; RV64IF-LABEL: fcvt_l_s:
294; RV64IF:       # %bb.0:
295; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
296; RV64IF-NEXT:    ret
297;
298; RV32I-LABEL: fcvt_l_s:
299; RV32I:       # %bb.0:
300; RV32I-NEXT:    addi sp, sp, -16
301; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
302; RV32I-NEXT:    call __fixsfdi@plt
303; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
304; RV32I-NEXT:    addi sp, sp, 16
305; RV32I-NEXT:    ret
306;
307; RV64I-LABEL: fcvt_l_s:
308; RV64I:       # %bb.0:
309; RV64I-NEXT:    addi sp, sp, -16
310; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
311; RV64I-NEXT:    call __fixsfdi@plt
312; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
313; RV64I-NEXT:    addi sp, sp, 16
314; RV64I-NEXT:    ret
315  %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict") strictfp
316  ret i64 %1
317}
318declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata)
319
320define i64 @fcvt_lu_s(float %a) nounwind strictfp {
321; RV32IF-LABEL: fcvt_lu_s:
322; RV32IF:       # %bb.0:
323; RV32IF-NEXT:    addi sp, sp, -16
324; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
325; RV32IF-NEXT:    call __fixunssfdi@plt
326; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
327; RV32IF-NEXT:    addi sp, sp, 16
328; RV32IF-NEXT:    ret
329;
330; RV64IF-LABEL: fcvt_lu_s:
331; RV64IF:       # %bb.0:
332; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
333; RV64IF-NEXT:    ret
334;
335; RV32I-LABEL: fcvt_lu_s:
336; RV32I:       # %bb.0:
337; RV32I-NEXT:    addi sp, sp, -16
338; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
339; RV32I-NEXT:    call __fixunssfdi@plt
340; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
341; RV32I-NEXT:    addi sp, sp, 16
342; RV32I-NEXT:    ret
343;
344; RV64I-LABEL: fcvt_lu_s:
345; RV64I:       # %bb.0:
346; RV64I-NEXT:    addi sp, sp, -16
347; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
348; RV64I-NEXT:    call __fixunssfdi@plt
349; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
350; RV64I-NEXT:    addi sp, sp, 16
351; RV64I-NEXT:    ret
352  %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict") strictfp
353  ret i64 %1
354}
355declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata)
356
357define float @fcvt_s_l(i64 %a) nounwind strictfp {
358; RV32IF-LABEL: fcvt_s_l:
359; RV32IF:       # %bb.0:
360; RV32IF-NEXT:    addi sp, sp, -16
361; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
362; RV32IF-NEXT:    call __floatdisf@plt
363; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
364; RV32IF-NEXT:    addi sp, sp, 16
365; RV32IF-NEXT:    ret
366;
367; RV64IF-LABEL: fcvt_s_l:
368; RV64IF:       # %bb.0:
369; RV64IF-NEXT:    fcvt.s.l fa0, a0
370; RV64IF-NEXT:    ret
371;
372; RV32I-LABEL: fcvt_s_l:
373; RV32I:       # %bb.0:
374; RV32I-NEXT:    addi sp, sp, -16
375; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
376; RV32I-NEXT:    call __floatdisf@plt
377; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
378; RV32I-NEXT:    addi sp, sp, 16
379; RV32I-NEXT:    ret
380;
381; RV64I-LABEL: fcvt_s_l:
382; RV64I:       # %bb.0:
383; RV64I-NEXT:    addi sp, sp, -16
384; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
385; RV64I-NEXT:    call __floatdisf@plt
386; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
387; RV64I-NEXT:    addi sp, sp, 16
388; RV64I-NEXT:    ret
389  %1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
390  ret float %1
391}
392declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata)
393
394define float @fcvt_s_lu(i64 %a) nounwind strictfp {
395; RV32IF-LABEL: fcvt_s_lu:
396; RV32IF:       # %bb.0:
397; RV32IF-NEXT:    addi sp, sp, -16
398; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
399; RV32IF-NEXT:    call __floatundisf@plt
400; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
401; RV32IF-NEXT:    addi sp, sp, 16
402; RV32IF-NEXT:    ret
403;
404; RV64IF-LABEL: fcvt_s_lu:
405; RV64IF:       # %bb.0:
406; RV64IF-NEXT:    fcvt.s.lu fa0, a0
407; RV64IF-NEXT:    ret
408;
409; RV32I-LABEL: fcvt_s_lu:
410; RV32I:       # %bb.0:
411; RV32I-NEXT:    addi sp, sp, -16
412; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
413; RV32I-NEXT:    call __floatundisf@plt
414; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
415; RV32I-NEXT:    addi sp, sp, 16
416; RV32I-NEXT:    ret
417;
418; RV64I-LABEL: fcvt_s_lu:
419; RV64I:       # %bb.0:
420; RV64I-NEXT:    addi sp, sp, -16
421; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
422; RV64I-NEXT:    call __floatundisf@plt
423; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
424; RV64I-NEXT:    addi sp, sp, 16
425; RV64I-NEXT:    ret
426  %1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
427  ret float %1
428}
429declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata)
430
431define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp {
432; RV32IF-LABEL: fcvt_s_w_i8:
433; RV32IF:       # %bb.0:
434; RV32IF-NEXT:    fcvt.s.w fa0, a0
435; RV32IF-NEXT:    ret
436;
437; RV64IF-LABEL: fcvt_s_w_i8:
438; RV64IF:       # %bb.0:
439; RV64IF-NEXT:    fcvt.s.w fa0, a0
440; RV64IF-NEXT:    ret
441;
442; RV32I-LABEL: fcvt_s_w_i8:
443; RV32I:       # %bb.0:
444; RV32I-NEXT:    addi sp, sp, -16
445; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
446; RV32I-NEXT:    call __floatsisf@plt
447; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
448; RV32I-NEXT:    addi sp, sp, 16
449; RV32I-NEXT:    ret
450;
451; RV64I-LABEL: fcvt_s_w_i8:
452; RV64I:       # %bb.0:
453; RV64I-NEXT:    addi sp, sp, -16
454; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
455; RV64I-NEXT:    call __floatsisf@plt
456; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
457; RV64I-NEXT:    addi sp, sp, 16
458; RV64I-NEXT:    ret
459  %1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
460  ret float %1
461}
462declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata)
463
464define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp {
465; RV32IF-LABEL: fcvt_s_wu_i8:
466; RV32IF:       # %bb.0:
467; RV32IF-NEXT:    fcvt.s.wu fa0, a0
468; RV32IF-NEXT:    ret
469;
470; RV64IF-LABEL: fcvt_s_wu_i8:
471; RV64IF:       # %bb.0:
472; RV64IF-NEXT:    fcvt.s.wu fa0, a0
473; RV64IF-NEXT:    ret
474;
475; RV32I-LABEL: fcvt_s_wu_i8:
476; RV32I:       # %bb.0:
477; RV32I-NEXT:    addi sp, sp, -16
478; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
479; RV32I-NEXT:    call __floatunsisf@plt
480; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
481; RV32I-NEXT:    addi sp, sp, 16
482; RV32I-NEXT:    ret
483;
484; RV64I-LABEL: fcvt_s_wu_i8:
485; RV64I:       # %bb.0:
486; RV64I-NEXT:    addi sp, sp, -16
487; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
488; RV64I-NEXT:    call __floatunsisf@plt
489; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
490; RV64I-NEXT:    addi sp, sp, 16
491; RV64I-NEXT:    ret
492  %1 = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
493  ret float %1
494}
495declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata)
496
497define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp {
498; RV32IF-LABEL: fcvt_s_w_i16:
499; RV32IF:       # %bb.0:
500; RV32IF-NEXT:    fcvt.s.w fa0, a0
501; RV32IF-NEXT:    ret
502;
503; RV64IF-LABEL: fcvt_s_w_i16:
504; RV64IF:       # %bb.0:
505; RV64IF-NEXT:    fcvt.s.w fa0, a0
506; RV64IF-NEXT:    ret
507;
508; RV32I-LABEL: fcvt_s_w_i16:
509; RV32I:       # %bb.0:
510; RV32I-NEXT:    addi sp, sp, -16
511; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
512; RV32I-NEXT:    call __floatsisf@plt
513; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
514; RV32I-NEXT:    addi sp, sp, 16
515; RV32I-NEXT:    ret
516;
517; RV64I-LABEL: fcvt_s_w_i16:
518; RV64I:       # %bb.0:
519; RV64I-NEXT:    addi sp, sp, -16
520; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
521; RV64I-NEXT:    call __floatsisf@plt
522; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
523; RV64I-NEXT:    addi sp, sp, 16
524; RV64I-NEXT:    ret
525  %1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
526  ret float %1
527}
528declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata)
529
530define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp {
531; RV32IF-LABEL: fcvt_s_wu_i16:
532; RV32IF:       # %bb.0:
533; RV32IF-NEXT:    fcvt.s.wu fa0, a0
534; RV32IF-NEXT:    ret
535;
536; RV64IF-LABEL: fcvt_s_wu_i16:
537; RV64IF:       # %bb.0:
538; RV64IF-NEXT:    fcvt.s.wu fa0, a0
539; RV64IF-NEXT:    ret
540;
541; RV32I-LABEL: fcvt_s_wu_i16:
542; RV32I:       # %bb.0:
543; RV32I-NEXT:    addi sp, sp, -16
544; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
545; RV32I-NEXT:    call __floatunsisf@plt
546; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
547; RV32I-NEXT:    addi sp, sp, 16
548; RV32I-NEXT:    ret
549;
550; RV64I-LABEL: fcvt_s_wu_i16:
551; RV64I:       # %bb.0:
552; RV64I-NEXT:    addi sp, sp, -16
553; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
554; RV64I-NEXT:    call __floatunsisf@plt
555; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
556; RV64I-NEXT:    addi sp, sp, 16
557; RV64I-NEXT:    ret
558  %1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
559  ret float %1
560}
561declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata)
562
563; Make sure we select W version of addi on RV64.
564define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, float* %1) nounwind {
565; RV32IF-LABEL: fcvt_s_w_demanded_bits:
566; RV32IF:       # %bb.0:
567; RV32IF-NEXT:    addi a0, a0, 1
568; RV32IF-NEXT:    fcvt.s.w ft0, a0
569; RV32IF-NEXT:    fsw ft0, 0(a1)
570; RV32IF-NEXT:    ret
571;
572; RV64IF-LABEL: fcvt_s_w_demanded_bits:
573; RV64IF:       # %bb.0:
574; RV64IF-NEXT:    addiw a0, a0, 1
575; RV64IF-NEXT:    fcvt.s.w ft0, a0
576; RV64IF-NEXT:    fsw ft0, 0(a1)
577; RV64IF-NEXT:    ret
578;
579; RV32I-LABEL: fcvt_s_w_demanded_bits:
580; RV32I:       # %bb.0:
581; RV32I-NEXT:    addi sp, sp, -16
582; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
583; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
584; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
585; RV32I-NEXT:    mv s0, a1
586; RV32I-NEXT:    addi s1, a0, 1
587; RV32I-NEXT:    mv a0, s1
588; RV32I-NEXT:    call __floatsisf@plt
589; RV32I-NEXT:    sw a0, 0(s0)
590; RV32I-NEXT:    mv a0, s1
591; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
592; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
593; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
594; RV32I-NEXT:    addi sp, sp, 16
595; RV32I-NEXT:    ret
596;
597; RV64I-LABEL: fcvt_s_w_demanded_bits:
598; RV64I:       # %bb.0:
599; RV64I-NEXT:    addi sp, sp, -32
600; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
601; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
602; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
603; RV64I-NEXT:    mv s0, a1
604; RV64I-NEXT:    addiw s1, a0, 1
605; RV64I-NEXT:    mv a0, s1
606; RV64I-NEXT:    call __floatsisf@plt
607; RV64I-NEXT:    sw a0, 0(s0)
608; RV64I-NEXT:    mv a0, s1
609; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
610; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
611; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
612; RV64I-NEXT:    addi sp, sp, 32
613; RV64I-NEXT:    ret
614  %3 = add i32 %0, 1
615  %4 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
616  store float %4, float* %1, align 4
617  ret i32 %3
618}
619
620; Make sure we select W version of addi on RV64.
621define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, float* %1) nounwind {
622; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
623; RV32IF:       # %bb.0:
624; RV32IF-NEXT:    addi a0, a0, 1
625; RV32IF-NEXT:    fcvt.s.wu ft0, a0
626; RV32IF-NEXT:    fsw ft0, 0(a1)
627; RV32IF-NEXT:    ret
628;
629; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
630; RV64IF:       # %bb.0:
631; RV64IF-NEXT:    addiw a0, a0, 1
632; RV64IF-NEXT:    fcvt.s.wu ft0, a0
633; RV64IF-NEXT:    fsw ft0, 0(a1)
634; RV64IF-NEXT:    ret
635;
636; RV32I-LABEL: fcvt_s_wu_demanded_bits:
637; RV32I:       # %bb.0:
638; RV32I-NEXT:    addi sp, sp, -16
639; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
640; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
641; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
642; RV32I-NEXT:    mv s0, a1
643; RV32I-NEXT:    addi s1, a0, 1
644; RV32I-NEXT:    mv a0, s1
645; RV32I-NEXT:    call __floatunsisf@plt
646; RV32I-NEXT:    sw a0, 0(s0)
647; RV32I-NEXT:    mv a0, s1
648; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
649; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
650; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
651; RV32I-NEXT:    addi sp, sp, 16
652; RV32I-NEXT:    ret
653;
654; RV64I-LABEL: fcvt_s_wu_demanded_bits:
655; RV64I:       # %bb.0:
656; RV64I-NEXT:    addi sp, sp, -32
657; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
658; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
659; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
660; RV64I-NEXT:    mv s0, a1
661; RV64I-NEXT:    addiw s1, a0, 1
662; RV64I-NEXT:    mv a0, s1
663; RV64I-NEXT:    call __floatunsisf@plt
664; RV64I-NEXT:    sw a0, 0(s0)
665; RV64I-NEXT:    mv a0, s1
666; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
667; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
668; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
669; RV64I-NEXT:    addi sp, sp, 32
670; RV64I-NEXT:    ret
671  %3 = add i32 %0, 1
672  %4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp
673  store float %4, float* %1, align 4
674  ret i32 %3
675}
676