1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32IF %s
4; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV64IF %s
6; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV32I %s
8; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64I %s
10
11; For RV64F, fcvt.l.s is semantically equivalent to fcvt.w.s in this case
12; because fptosi will produce poison if the result doesn't fit into an i32.
13define i32 @fcvt_w_s(float %a) nounwind {
14; RV32IF-LABEL: fcvt_w_s:
15; RV32IF:       # %bb.0:
16; RV32IF-NEXT:    fmv.w.x ft0, a0
17; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
18; RV32IF-NEXT:    ret
19;
20; RV64IF-LABEL: fcvt_w_s:
21; RV64IF:       # %bb.0:
22; RV64IF-NEXT:    fmv.w.x ft0, a0
23; RV64IF-NEXT:    fcvt.w.s a0, ft0, rtz
24; RV64IF-NEXT:    ret
25;
26; RV32I-LABEL: fcvt_w_s:
27; RV32I:       # %bb.0:
28; RV32I-NEXT:    addi sp, sp, -16
29; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
30; RV32I-NEXT:    call __fixsfsi@plt
31; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
32; RV32I-NEXT:    addi sp, sp, 16
33; RV32I-NEXT:    ret
34;
35; RV64I-LABEL: fcvt_w_s:
36; RV64I:       # %bb.0:
37; RV64I-NEXT:    addi sp, sp, -16
38; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
39; RV64I-NEXT:    call __fixsfsi@plt
40; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
41; RV64I-NEXT:    addi sp, sp, 16
42; RV64I-NEXT:    ret
43  %1 = fptosi float %a to i32
44  ret i32 %1
45}
46
47define i32 @fcvt_w_s_sat(float %a) nounwind {
48; RV32IF-LABEL: fcvt_w_s_sat:
49; RV32IF:       # %bb.0: # %start
50; RV32IF-NEXT:    fmv.w.x ft0, a0
51; RV32IF-NEXT:    feq.s a0, ft0, ft0
52; RV32IF-NEXT:    bnez a0, .LBB1_2
53; RV32IF-NEXT:  # %bb.1: # %start
54; RV32IF-NEXT:    li a0, 0
55; RV32IF-NEXT:    ret
56; RV32IF-NEXT:  .LBB1_2:
57; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
58; RV32IF-NEXT:    ret
59;
60; RV64IF-LABEL: fcvt_w_s_sat:
61; RV64IF:       # %bb.0: # %start
62; RV64IF-NEXT:    fmv.w.x ft0, a0
63; RV64IF-NEXT:    feq.s a0, ft0, ft0
64; RV64IF-NEXT:    bnez a0, .LBB1_2
65; RV64IF-NEXT:  # %bb.1: # %start
66; RV64IF-NEXT:    li a0, 0
67; RV64IF-NEXT:    ret
68; RV64IF-NEXT:  .LBB1_2:
69; RV64IF-NEXT:    fcvt.w.s a0, ft0, rtz
70; RV64IF-NEXT:    ret
71;
72; RV32I-LABEL: fcvt_w_s_sat:
73; RV32I:       # %bb.0: # %start
74; RV32I-NEXT:    addi sp, sp, -32
75; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
76; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
77; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
78; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
79; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
80; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
81; RV32I-NEXT:    mv s0, a0
82; RV32I-NEXT:    lui a1, 847872
83; RV32I-NEXT:    call __gesf2@plt
84; RV32I-NEXT:    mv s2, a0
85; RV32I-NEXT:    mv a0, s0
86; RV32I-NEXT:    call __fixsfsi@plt
87; RV32I-NEXT:    li s1, 0
88; RV32I-NEXT:    lui s4, 524288
89; RV32I-NEXT:    lui s3, 524288
90; RV32I-NEXT:    bltz s2, .LBB1_2
91; RV32I-NEXT:  # %bb.1: # %start
92; RV32I-NEXT:    mv s3, a0
93; RV32I-NEXT:  .LBB1_2: # %start
94; RV32I-NEXT:    lui a0, 323584
95; RV32I-NEXT:    addi a1, a0, -1
96; RV32I-NEXT:    mv a0, s0
97; RV32I-NEXT:    call __gtsf2@plt
98; RV32I-NEXT:    bge s1, a0, .LBB1_4
99; RV32I-NEXT:  # %bb.3:
100; RV32I-NEXT:    addi s3, s4, -1
101; RV32I-NEXT:  .LBB1_4: # %start
102; RV32I-NEXT:    mv a0, s0
103; RV32I-NEXT:    mv a1, s0
104; RV32I-NEXT:    call __unordsf2@plt
105; RV32I-NEXT:    bne a0, s1, .LBB1_6
106; RV32I-NEXT:  # %bb.5: # %start
107; RV32I-NEXT:    mv s1, s3
108; RV32I-NEXT:  .LBB1_6: # %start
109; RV32I-NEXT:    mv a0, s1
110; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
111; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
112; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
113; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
114; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
115; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
116; RV32I-NEXT:    addi sp, sp, 32
117; RV32I-NEXT:    ret
118;
119; RV64I-LABEL: fcvt_w_s_sat:
120; RV64I:       # %bb.0: # %start
121; RV64I-NEXT:    addi sp, sp, -48
122; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
123; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
124; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
125; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
126; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
127; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
128; RV64I-NEXT:    mv s0, a0
129; RV64I-NEXT:    lui a1, 847872
130; RV64I-NEXT:    call __gesf2@plt
131; RV64I-NEXT:    mv s2, a0
132; RV64I-NEXT:    mv a0, s0
133; RV64I-NEXT:    call __fixsfdi@plt
134; RV64I-NEXT:    li s1, 0
135; RV64I-NEXT:    lui s4, 524288
136; RV64I-NEXT:    lui s3, 524288
137; RV64I-NEXT:    bltz s2, .LBB1_2
138; RV64I-NEXT:  # %bb.1: # %start
139; RV64I-NEXT:    mv s3, a0
140; RV64I-NEXT:  .LBB1_2: # %start
141; RV64I-NEXT:    lui a0, 323584
142; RV64I-NEXT:    addiw a1, a0, -1
143; RV64I-NEXT:    mv a0, s0
144; RV64I-NEXT:    call __gtsf2@plt
145; RV64I-NEXT:    bge s1, a0, .LBB1_4
146; RV64I-NEXT:  # %bb.3:
147; RV64I-NEXT:    addiw s3, s4, -1
148; RV64I-NEXT:  .LBB1_4: # %start
149; RV64I-NEXT:    mv a0, s0
150; RV64I-NEXT:    mv a1, s0
151; RV64I-NEXT:    call __unordsf2@plt
152; RV64I-NEXT:    bne a0, s1, .LBB1_6
153; RV64I-NEXT:  # %bb.5: # %start
154; RV64I-NEXT:    mv s1, s3
155; RV64I-NEXT:  .LBB1_6: # %start
156; RV64I-NEXT:    mv a0, s1
157; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
158; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
159; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
160; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
161; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
162; RV64I-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
163; RV64I-NEXT:    addi sp, sp, 48
164; RV64I-NEXT:    ret
165start:
166  %0 = tail call i32 @llvm.fptosi.sat.i32.f32(float %a)
167  ret i32 %0
168}
169declare i32 @llvm.fptosi.sat.i32.f32(float)
170
171define i32 @fcvt_wu_s(float %a) nounwind {
172; RV32IF-LABEL: fcvt_wu_s:
173; RV32IF:       # %bb.0:
174; RV32IF-NEXT:    fmv.w.x ft0, a0
175; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
176; RV32IF-NEXT:    ret
177;
178; RV64IF-LABEL: fcvt_wu_s:
179; RV64IF:       # %bb.0:
180; RV64IF-NEXT:    fmv.w.x ft0, a0
181; RV64IF-NEXT:    fcvt.wu.s a0, ft0, rtz
182; RV64IF-NEXT:    ret
183;
184; RV32I-LABEL: fcvt_wu_s:
185; RV32I:       # %bb.0:
186; RV32I-NEXT:    addi sp, sp, -16
187; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
188; RV32I-NEXT:    call __fixunssfsi@plt
189; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
190; RV32I-NEXT:    addi sp, sp, 16
191; RV32I-NEXT:    ret
192;
193; RV64I-LABEL: fcvt_wu_s:
194; RV64I:       # %bb.0:
195; RV64I-NEXT:    addi sp, sp, -16
196; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
197; RV64I-NEXT:    call __fixunssfsi@plt
198; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
199; RV64I-NEXT:    addi sp, sp, 16
200; RV64I-NEXT:    ret
201  %1 = fptoui float %a to i32
202  ret i32 %1
203}
204
205; Test where the fptoui has multiple uses, one of which causes a sext to be
206; inserted on RV64.
207define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) {
208; RV32IF-LABEL: fcvt_wu_s_multiple_use:
209; RV32IF:       # %bb.0:
210; RV32IF-NEXT:    fmv.w.x ft0, a0
211; RV32IF-NEXT:    fcvt.wu.s a1, ft0, rtz
212; RV32IF-NEXT:    li a0, 1
213; RV32IF-NEXT:    beqz a1, .LBB3_2
214; RV32IF-NEXT:  # %bb.1:
215; RV32IF-NEXT:    mv a0, a1
216; RV32IF-NEXT:  .LBB3_2:
217; RV32IF-NEXT:    ret
218;
219; RV64IF-LABEL: fcvt_wu_s_multiple_use:
220; RV64IF:       # %bb.0:
221; RV64IF-NEXT:    fmv.w.x ft0, a0
222; RV64IF-NEXT:    fcvt.wu.s a1, ft0, rtz
223; RV64IF-NEXT:    li a0, 1
224; RV64IF-NEXT:    beqz a1, .LBB3_2
225; RV64IF-NEXT:  # %bb.1:
226; RV64IF-NEXT:    mv a0, a1
227; RV64IF-NEXT:  .LBB3_2:
228; RV64IF-NEXT:    ret
229;
230; RV32I-LABEL: fcvt_wu_s_multiple_use:
231; RV32I:       # %bb.0:
232; RV32I-NEXT:    addi sp, sp, -16
233; RV32I-NEXT:    .cfi_def_cfa_offset 16
234; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
235; RV32I-NEXT:    .cfi_offset ra, -4
236; RV32I-NEXT:    call __fixunssfsi@plt
237; RV32I-NEXT:    mv a1, a0
238; RV32I-NEXT:    li a0, 1
239; RV32I-NEXT:    beqz a1, .LBB3_2
240; RV32I-NEXT:  # %bb.1:
241; RV32I-NEXT:    mv a0, a1
242; RV32I-NEXT:  .LBB3_2:
243; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
244; RV32I-NEXT:    addi sp, sp, 16
245; RV32I-NEXT:    ret
246;
247; RV64I-LABEL: fcvt_wu_s_multiple_use:
248; RV64I:       # %bb.0:
249; RV64I-NEXT:    addi sp, sp, -16
250; RV64I-NEXT:    .cfi_def_cfa_offset 16
251; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
252; RV64I-NEXT:    .cfi_offset ra, -8
253; RV64I-NEXT:    call __fixunssfsi@plt
254; RV64I-NEXT:    mv a1, a0
255; RV64I-NEXT:    li a0, 1
256; RV64I-NEXT:    beqz a1, .LBB3_2
257; RV64I-NEXT:  # %bb.1:
258; RV64I-NEXT:    mv a0, a1
259; RV64I-NEXT:  .LBB3_2:
260; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
261; RV64I-NEXT:    addi sp, sp, 16
262; RV64I-NEXT:    ret
263  %a = fptoui float %x to i32
264  %b = icmp eq i32 %a, 0
265  %c = select i1 %b, i32 1, i32 %a
266  ret i32 %c
267}
268
269define i32 @fcvt_wu_s_sat(float %a) nounwind {
270; RV32IF-LABEL: fcvt_wu_s_sat:
271; RV32IF:       # %bb.0: # %start
272; RV32IF-NEXT:    fmv.w.x ft0, a0
273; RV32IF-NEXT:    feq.s a0, ft0, ft0
274; RV32IF-NEXT:    bnez a0, .LBB4_2
275; RV32IF-NEXT:  # %bb.1: # %start
276; RV32IF-NEXT:    li a0, 0
277; RV32IF-NEXT:    ret
278; RV32IF-NEXT:  .LBB4_2:
279; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
280; RV32IF-NEXT:    ret
281;
282; RV64IF-LABEL: fcvt_wu_s_sat:
283; RV64IF:       # %bb.0: # %start
284; RV64IF-NEXT:    fmv.w.x ft0, a0
285; RV64IF-NEXT:    feq.s a0, ft0, ft0
286; RV64IF-NEXT:    bnez a0, .LBB4_2
287; RV64IF-NEXT:  # %bb.1: # %start
288; RV64IF-NEXT:    li a0, 0
289; RV64IF-NEXT:    ret
290; RV64IF-NEXT:  .LBB4_2:
291; RV64IF-NEXT:    fcvt.wu.s a0, ft0, rtz
292; RV64IF-NEXT:    ret
293;
294; RV32I-LABEL: fcvt_wu_s_sat:
295; RV32I:       # %bb.0: # %start
296; RV32I-NEXT:    addi sp, sp, -16
297; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
298; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
299; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
300; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
301; RV32I-NEXT:    mv s0, a0
302; RV32I-NEXT:    li a1, 0
303; RV32I-NEXT:    call __gesf2@plt
304; RV32I-NEXT:    mv s1, a0
305; RV32I-NEXT:    mv a0, s0
306; RV32I-NEXT:    call __fixunssfsi@plt
307; RV32I-NEXT:    li s2, 0
308; RV32I-NEXT:    bltz s1, .LBB4_2
309; RV32I-NEXT:  # %bb.1: # %start
310; RV32I-NEXT:    mv s2, a0
311; RV32I-NEXT:  .LBB4_2: # %start
312; RV32I-NEXT:    lui a0, 325632
313; RV32I-NEXT:    addi a1, a0, -1
314; RV32I-NEXT:    mv a0, s0
315; RV32I-NEXT:    call __gtsf2@plt
316; RV32I-NEXT:    mv a1, a0
317; RV32I-NEXT:    li a0, -1
318; RV32I-NEXT:    bgtz a1, .LBB4_4
319; RV32I-NEXT:  # %bb.3: # %start
320; RV32I-NEXT:    mv a0, s2
321; RV32I-NEXT:  .LBB4_4: # %start
322; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
323; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
324; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
325; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
326; RV32I-NEXT:    addi sp, sp, 16
327; RV32I-NEXT:    ret
328;
329; RV64I-LABEL: fcvt_wu_s_sat:
330; RV64I:       # %bb.0: # %start
331; RV64I-NEXT:    addi sp, sp, -32
332; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
333; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
334; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
335; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
336; RV64I-NEXT:    mv s0, a0
337; RV64I-NEXT:    li a1, 0
338; RV64I-NEXT:    call __gesf2@plt
339; RV64I-NEXT:    mv s2, a0
340; RV64I-NEXT:    mv a0, s0
341; RV64I-NEXT:    call __fixunssfdi@plt
342; RV64I-NEXT:    li s1, 0
343; RV64I-NEXT:    bltz s2, .LBB4_2
344; RV64I-NEXT:  # %bb.1: # %start
345; RV64I-NEXT:    mv s1, a0
346; RV64I-NEXT:  .LBB4_2: # %start
347; RV64I-NEXT:    lui a0, 325632
348; RV64I-NEXT:    addiw a1, a0, -1
349; RV64I-NEXT:    mv a0, s0
350; RV64I-NEXT:    call __gtsf2@plt
351; RV64I-NEXT:    blez a0, .LBB4_4
352; RV64I-NEXT:  # %bb.3:
353; RV64I-NEXT:    li a0, -1
354; RV64I-NEXT:    srli s1, a0, 32
355; RV64I-NEXT:  .LBB4_4: # %start
356; RV64I-NEXT:    mv a0, s1
357; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
358; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
359; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
360; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
361; RV64I-NEXT:    addi sp, sp, 32
362; RV64I-NEXT:    ret
363start:
364  %0 = tail call i32 @llvm.fptoui.sat.i32.f32(float %a)
365  ret i32 %0
366}
367declare i32 @llvm.fptoui.sat.i32.f32(float)
368
369define i32 @fmv_x_w(float %a, float %b) nounwind {
370; RV32IF-LABEL: fmv_x_w:
371; RV32IF:       # %bb.0:
372; RV32IF-NEXT:    fmv.w.x ft0, a1
373; RV32IF-NEXT:    fmv.w.x ft1, a0
374; RV32IF-NEXT:    fadd.s ft0, ft1, ft0
375; RV32IF-NEXT:    fmv.x.w a0, ft0
376; RV32IF-NEXT:    ret
377;
378; RV64IF-LABEL: fmv_x_w:
379; RV64IF:       # %bb.0:
380; RV64IF-NEXT:    fmv.w.x ft0, a1
381; RV64IF-NEXT:    fmv.w.x ft1, a0
382; RV64IF-NEXT:    fadd.s ft0, ft1, ft0
383; RV64IF-NEXT:    fmv.x.w a0, ft0
384; RV64IF-NEXT:    ret
385;
386; RV32I-LABEL: fmv_x_w:
387; RV32I:       # %bb.0:
388; RV32I-NEXT:    addi sp, sp, -16
389; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
390; RV32I-NEXT:    call __addsf3@plt
391; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
392; RV32I-NEXT:    addi sp, sp, 16
393; RV32I-NEXT:    ret
394;
395; RV64I-LABEL: fmv_x_w:
396; RV64I:       # %bb.0:
397; RV64I-NEXT:    addi sp, sp, -16
398; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
399; RV64I-NEXT:    call __addsf3@plt
400; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
401; RV64I-NEXT:    addi sp, sp, 16
402; RV64I-NEXT:    ret
403; Ensure fmv.x.w is generated even for a soft float calling convention
404  %1 = fadd float %a, %b
405  %2 = bitcast float %1 to i32
406  ret i32 %2
407}
408
409define float @fcvt_s_w(i32 %a) nounwind {
410; RV32IF-LABEL: fcvt_s_w:
411; RV32IF:       # %bb.0:
412; RV32IF-NEXT:    fcvt.s.w ft0, a0
413; RV32IF-NEXT:    fmv.x.w a0, ft0
414; RV32IF-NEXT:    ret
415;
416; RV64IF-LABEL: fcvt_s_w:
417; RV64IF:       # %bb.0:
418; RV64IF-NEXT:    fcvt.s.w ft0, a0
419; RV64IF-NEXT:    fmv.x.w a0, ft0
420; RV64IF-NEXT:    ret
421;
422; RV32I-LABEL: fcvt_s_w:
423; RV32I:       # %bb.0:
424; RV32I-NEXT:    addi sp, sp, -16
425; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
426; RV32I-NEXT:    call __floatsisf@plt
427; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
428; RV32I-NEXT:    addi sp, sp, 16
429; RV32I-NEXT:    ret
430;
431; RV64I-LABEL: fcvt_s_w:
432; RV64I:       # %bb.0:
433; RV64I-NEXT:    addi sp, sp, -16
434; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
435; RV64I-NEXT:    sext.w a0, a0
436; RV64I-NEXT:    call __floatsisf@plt
437; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
438; RV64I-NEXT:    addi sp, sp, 16
439; RV64I-NEXT:    ret
440  %1 = sitofp i32 %a to float
441  ret float %1
442}
443
444define float @fcvt_s_w_load(i32* %p) nounwind {
445; RV32IF-LABEL: fcvt_s_w_load:
446; RV32IF:       # %bb.0:
447; RV32IF-NEXT:    lw a0, 0(a0)
448; RV32IF-NEXT:    fcvt.s.w ft0, a0
449; RV32IF-NEXT:    fmv.x.w a0, ft0
450; RV32IF-NEXT:    ret
451;
452; RV64IF-LABEL: fcvt_s_w_load:
453; RV64IF:       # %bb.0:
454; RV64IF-NEXT:    lw a0, 0(a0)
455; RV64IF-NEXT:    fcvt.s.w ft0, a0
456; RV64IF-NEXT:    fmv.x.w a0, ft0
457; RV64IF-NEXT:    ret
458;
459; RV32I-LABEL: fcvt_s_w_load:
460; RV32I:       # %bb.0:
461; RV32I-NEXT:    addi sp, sp, -16
462; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
463; RV32I-NEXT:    lw a0, 0(a0)
464; RV32I-NEXT:    call __floatsisf@plt
465; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
466; RV32I-NEXT:    addi sp, sp, 16
467; RV32I-NEXT:    ret
468;
469; RV64I-LABEL: fcvt_s_w_load:
470; RV64I:       # %bb.0:
471; RV64I-NEXT:    addi sp, sp, -16
472; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
473; RV64I-NEXT:    lw a0, 0(a0)
474; RV64I-NEXT:    call __floatsisf@plt
475; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
476; RV64I-NEXT:    addi sp, sp, 16
477; RV64I-NEXT:    ret
478  %a = load i32, i32* %p
479  %1 = sitofp i32 %a to float
480  ret float %1
481}
482
483define float @fcvt_s_wu(i32 %a) nounwind {
484; RV32IF-LABEL: fcvt_s_wu:
485; RV32IF:       # %bb.0:
486; RV32IF-NEXT:    fcvt.s.wu ft0, a0
487; RV32IF-NEXT:    fmv.x.w a0, ft0
488; RV32IF-NEXT:    ret
489;
490; RV64IF-LABEL: fcvt_s_wu:
491; RV64IF:       # %bb.0:
492; RV64IF-NEXT:    fcvt.s.wu ft0, a0
493; RV64IF-NEXT:    fmv.x.w a0, ft0
494; RV64IF-NEXT:    ret
495;
496; RV32I-LABEL: fcvt_s_wu:
497; RV32I:       # %bb.0:
498; RV32I-NEXT:    addi sp, sp, -16
499; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
500; RV32I-NEXT:    call __floatunsisf@plt
501; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
502; RV32I-NEXT:    addi sp, sp, 16
503; RV32I-NEXT:    ret
504;
505; RV64I-LABEL: fcvt_s_wu:
506; RV64I:       # %bb.0:
507; RV64I-NEXT:    addi sp, sp, -16
508; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
509; RV64I-NEXT:    sext.w a0, a0
510; RV64I-NEXT:    call __floatunsisf@plt
511; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
512; RV64I-NEXT:    addi sp, sp, 16
513; RV64I-NEXT:    ret
514  %1 = uitofp i32 %a to float
515  ret float %1
516}
517
518define float @fcvt_s_wu_load(i32* %p) nounwind {
519; RV32IF-LABEL: fcvt_s_wu_load:
520; RV32IF:       # %bb.0:
521; RV32IF-NEXT:    lw a0, 0(a0)
522; RV32IF-NEXT:    fcvt.s.wu ft0, a0
523; RV32IF-NEXT:    fmv.x.w a0, ft0
524; RV32IF-NEXT:    ret
525;
526; RV64IF-LABEL: fcvt_s_wu_load:
527; RV64IF:       # %bb.0:
528; RV64IF-NEXT:    lwu a0, 0(a0)
529; RV64IF-NEXT:    fcvt.s.wu ft0, a0
530; RV64IF-NEXT:    fmv.x.w a0, ft0
531; RV64IF-NEXT:    ret
532;
533; RV32I-LABEL: fcvt_s_wu_load:
534; RV32I:       # %bb.0:
535; RV32I-NEXT:    addi sp, sp, -16
536; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
537; RV32I-NEXT:    lw a0, 0(a0)
538; RV32I-NEXT:    call __floatunsisf@plt
539; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
540; RV32I-NEXT:    addi sp, sp, 16
541; RV32I-NEXT:    ret
542;
543; RV64I-LABEL: fcvt_s_wu_load:
544; RV64I:       # %bb.0:
545; RV64I-NEXT:    addi sp, sp, -16
546; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
547; RV64I-NEXT:    lw a0, 0(a0)
548; RV64I-NEXT:    call __floatunsisf@plt
549; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
550; RV64I-NEXT:    addi sp, sp, 16
551; RV64I-NEXT:    ret
552  %a = load i32, i32* %p
553  %1 = uitofp i32 %a to float
554  ret float %1
555}
556
557define float @fmv_w_x(i32 %a, i32 %b) nounwind {
558; RV32IF-LABEL: fmv_w_x:
559; RV32IF:       # %bb.0:
560; RV32IF-NEXT:    fmv.w.x ft0, a0
561; RV32IF-NEXT:    fmv.w.x ft1, a1
562; RV32IF-NEXT:    fadd.s ft0, ft0, ft1
563; RV32IF-NEXT:    fmv.x.w a0, ft0
564; RV32IF-NEXT:    ret
565;
566; RV64IF-LABEL: fmv_w_x:
567; RV64IF:       # %bb.0:
568; RV64IF-NEXT:    fmv.w.x ft0, a0
569; RV64IF-NEXT:    fmv.w.x ft1, a1
570; RV64IF-NEXT:    fadd.s ft0, ft0, ft1
571; RV64IF-NEXT:    fmv.x.w a0, ft0
572; RV64IF-NEXT:    ret
573;
574; RV32I-LABEL: fmv_w_x:
575; RV32I:       # %bb.0:
576; RV32I-NEXT:    addi sp, sp, -16
577; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
578; RV32I-NEXT:    call __addsf3@plt
579; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
580; RV32I-NEXT:    addi sp, sp, 16
581; RV32I-NEXT:    ret
582;
583; RV64I-LABEL: fmv_w_x:
584; RV64I:       # %bb.0:
585; RV64I-NEXT:    addi sp, sp, -16
586; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
587; RV64I-NEXT:    call __addsf3@plt
588; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
589; RV64I-NEXT:    addi sp, sp, 16
590; RV64I-NEXT:    ret
591; Ensure fmv.w.x is generated even for a soft float calling convention
592  %1 = bitcast i32 %a to float
593  %2 = bitcast i32 %b to float
594  %3 = fadd float %1, %2
595  ret float %3
596}
597
598define i64 @fcvt_l_s(float %a) nounwind {
599; RV32IF-LABEL: fcvt_l_s:
600; RV32IF:       # %bb.0:
601; RV32IF-NEXT:    addi sp, sp, -16
602; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
603; RV32IF-NEXT:    call __fixsfdi@plt
604; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
605; RV32IF-NEXT:    addi sp, sp, 16
606; RV32IF-NEXT:    ret
607;
608; RV64IF-LABEL: fcvt_l_s:
609; RV64IF:       # %bb.0:
610; RV64IF-NEXT:    fmv.w.x ft0, a0
611; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
612; RV64IF-NEXT:    ret
613;
614; RV32I-LABEL: fcvt_l_s:
615; RV32I:       # %bb.0:
616; RV32I-NEXT:    addi sp, sp, -16
617; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
618; RV32I-NEXT:    call __fixsfdi@plt
619; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
620; RV32I-NEXT:    addi sp, sp, 16
621; RV32I-NEXT:    ret
622;
623; RV64I-LABEL: fcvt_l_s:
624; RV64I:       # %bb.0:
625; RV64I-NEXT:    addi sp, sp, -16
626; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
627; RV64I-NEXT:    call __fixsfdi@plt
628; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
629; RV64I-NEXT:    addi sp, sp, 16
630; RV64I-NEXT:    ret
631  %1 = fptosi float %a to i64
632  ret i64 %1
633}
634
635define i64 @fcvt_l_s_sat(float %a) nounwind {
636; RV32IF-LABEL: fcvt_l_s_sat:
637; RV32IF:       # %bb.0: # %start
638; RV32IF-NEXT:    addi sp, sp, -16
639; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
640; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
641; RV32IF-NEXT:    lui a1, %hi(.LCPI12_0)
642; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_0)(a1)
643; RV32IF-NEXT:    fmv.w.x ft1, a0
644; RV32IF-NEXT:    fsw ft1, 4(sp) # 4-byte Folded Spill
645; RV32IF-NEXT:    fle.s s0, ft0, ft1
646; RV32IF-NEXT:    call __fixsfdi@plt
647; RV32IF-NEXT:    mv a2, a0
648; RV32IF-NEXT:    bnez s0, .LBB12_2
649; RV32IF-NEXT:  # %bb.1: # %start
650; RV32IF-NEXT:    li a2, 0
651; RV32IF-NEXT:  .LBB12_2: # %start
652; RV32IF-NEXT:    lui a0, %hi(.LCPI12_1)
653; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_1)(a0)
654; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
655; RV32IF-NEXT:    flt.s a3, ft0, ft1
656; RV32IF-NEXT:    fmv.s ft0, ft1
657; RV32IF-NEXT:    li a0, -1
658; RV32IF-NEXT:    beqz a3, .LBB12_9
659; RV32IF-NEXT:  # %bb.3: # %start
660; RV32IF-NEXT:    feq.s a2, ft0, ft0
661; RV32IF-NEXT:    beqz a2, .LBB12_10
662; RV32IF-NEXT:  .LBB12_4: # %start
663; RV32IF-NEXT:    lui a4, 524288
664; RV32IF-NEXT:    beqz s0, .LBB12_11
665; RV32IF-NEXT:  .LBB12_5: # %start
666; RV32IF-NEXT:    bnez a3, .LBB12_12
667; RV32IF-NEXT:  .LBB12_6: # %start
668; RV32IF-NEXT:    bnez a2, .LBB12_8
669; RV32IF-NEXT:  .LBB12_7: # %start
670; RV32IF-NEXT:    li a1, 0
671; RV32IF-NEXT:  .LBB12_8: # %start
672; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
673; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
674; RV32IF-NEXT:    addi sp, sp, 16
675; RV32IF-NEXT:    ret
676; RV32IF-NEXT:  .LBB12_9: # %start
677; RV32IF-NEXT:    mv a0, a2
678; RV32IF-NEXT:    feq.s a2, ft0, ft0
679; RV32IF-NEXT:    bnez a2, .LBB12_4
680; RV32IF-NEXT:  .LBB12_10: # %start
681; RV32IF-NEXT:    li a0, 0
682; RV32IF-NEXT:    lui a4, 524288
683; RV32IF-NEXT:    bnez s0, .LBB12_5
684; RV32IF-NEXT:  .LBB12_11: # %start
685; RV32IF-NEXT:    lui a1, 524288
686; RV32IF-NEXT:    beqz a3, .LBB12_6
687; RV32IF-NEXT:  .LBB12_12:
688; RV32IF-NEXT:    addi a1, a4, -1
689; RV32IF-NEXT:    beqz a2, .LBB12_7
690; RV32IF-NEXT:    j .LBB12_8
691;
692; RV64IF-LABEL: fcvt_l_s_sat:
693; RV64IF:       # %bb.0: # %start
694; RV64IF-NEXT:    fmv.w.x ft0, a0
695; RV64IF-NEXT:    feq.s a0, ft0, ft0
696; RV64IF-NEXT:    bnez a0, .LBB12_2
697; RV64IF-NEXT:  # %bb.1: # %start
698; RV64IF-NEXT:    li a0, 0
699; RV64IF-NEXT:    ret
700; RV64IF-NEXT:  .LBB12_2:
701; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
702; RV64IF-NEXT:    ret
703;
704; RV32I-LABEL: fcvt_l_s_sat:
705; RV32I:       # %bb.0: # %start
706; RV32I-NEXT:    addi sp, sp, -32
707; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
708; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
709; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
710; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
711; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
712; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
713; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
714; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
715; RV32I-NEXT:    mv s0, a0
716; RV32I-NEXT:    lui a1, 913408
717; RV32I-NEXT:    call __gesf2@plt
718; RV32I-NEXT:    mv s3, a0
719; RV32I-NEXT:    mv a0, s0
720; RV32I-NEXT:    call __fixsfdi@plt
721; RV32I-NEXT:    mv s2, a1
722; RV32I-NEXT:    li s1, 0
723; RV32I-NEXT:    li s5, 0
724; RV32I-NEXT:    bltz s3, .LBB12_2
725; RV32I-NEXT:  # %bb.1: # %start
726; RV32I-NEXT:    mv s5, a0
727; RV32I-NEXT:  .LBB12_2: # %start
728; RV32I-NEXT:    lui a0, 389120
729; RV32I-NEXT:    addi s4, a0, -1
730; RV32I-NEXT:    mv a0, s0
731; RV32I-NEXT:    mv a1, s4
732; RV32I-NEXT:    call __gtsf2@plt
733; RV32I-NEXT:    li s6, -1
734; RV32I-NEXT:    blt s1, a0, .LBB12_4
735; RV32I-NEXT:  # %bb.3: # %start
736; RV32I-NEXT:    mv s6, s5
737; RV32I-NEXT:  .LBB12_4: # %start
738; RV32I-NEXT:    mv a0, s0
739; RV32I-NEXT:    mv a1, s0
740; RV32I-NEXT:    call __unordsf2@plt
741; RV32I-NEXT:    mv s3, s1
742; RV32I-NEXT:    bne a0, s1, .LBB12_6
743; RV32I-NEXT:  # %bb.5: # %start
744; RV32I-NEXT:    mv s3, s6
745; RV32I-NEXT:  .LBB12_6: # %start
746; RV32I-NEXT:    lui a1, 913408
747; RV32I-NEXT:    mv a0, s0
748; RV32I-NEXT:    call __gesf2@plt
749; RV32I-NEXT:    lui s6, 524288
750; RV32I-NEXT:    lui s5, 524288
751; RV32I-NEXT:    blt a0, s1, .LBB12_8
752; RV32I-NEXT:  # %bb.7: # %start
753; RV32I-NEXT:    mv s5, s2
754; RV32I-NEXT:  .LBB12_8: # %start
755; RV32I-NEXT:    mv a0, s0
756; RV32I-NEXT:    mv a1, s4
757; RV32I-NEXT:    call __gtsf2@plt
758; RV32I-NEXT:    bge s1, a0, .LBB12_10
759; RV32I-NEXT:  # %bb.9:
760; RV32I-NEXT:    addi s5, s6, -1
761; RV32I-NEXT:  .LBB12_10: # %start
762; RV32I-NEXT:    mv a0, s0
763; RV32I-NEXT:    mv a1, s0
764; RV32I-NEXT:    call __unordsf2@plt
765; RV32I-NEXT:    bne a0, s1, .LBB12_12
766; RV32I-NEXT:  # %bb.11: # %start
767; RV32I-NEXT:    mv s1, s5
768; RV32I-NEXT:  .LBB12_12: # %start
769; RV32I-NEXT:    mv a0, s3
770; RV32I-NEXT:    mv a1, s1
771; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
772; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
773; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
774; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
775; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
776; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
777; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
778; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
779; RV32I-NEXT:    addi sp, sp, 32
780; RV32I-NEXT:    ret
781;
782; RV64I-LABEL: fcvt_l_s_sat:
783; RV64I:       # %bb.0: # %start
784; RV64I-NEXT:    addi sp, sp, -48
785; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
786; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
787; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
788; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
789; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
790; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
791; RV64I-NEXT:    mv s0, a0
792; RV64I-NEXT:    lui a1, 913408
793; RV64I-NEXT:    call __gesf2@plt
794; RV64I-NEXT:    mv s3, a0
795; RV64I-NEXT:    mv a0, s0
796; RV64I-NEXT:    call __fixsfdi@plt
797; RV64I-NEXT:    li s1, 0
798; RV64I-NEXT:    li s4, -1
799; RV64I-NEXT:    bltz s3, .LBB12_2
800; RV64I-NEXT:  # %bb.1: # %start
801; RV64I-NEXT:    mv s2, a0
802; RV64I-NEXT:    j .LBB12_3
803; RV64I-NEXT:  .LBB12_2:
804; RV64I-NEXT:    slli s2, s4, 63
805; RV64I-NEXT:  .LBB12_3: # %start
806; RV64I-NEXT:    lui a0, 389120
807; RV64I-NEXT:    addiw a1, a0, -1
808; RV64I-NEXT:    mv a0, s0
809; RV64I-NEXT:    call __gtsf2@plt
810; RV64I-NEXT:    bge s1, a0, .LBB12_5
811; RV64I-NEXT:  # %bb.4:
812; RV64I-NEXT:    srli s2, s4, 1
813; RV64I-NEXT:  .LBB12_5: # %start
814; RV64I-NEXT:    mv a0, s0
815; RV64I-NEXT:    mv a1, s0
816; RV64I-NEXT:    call __unordsf2@plt
817; RV64I-NEXT:    bne a0, s1, .LBB12_7
818; RV64I-NEXT:  # %bb.6: # %start
819; RV64I-NEXT:    mv s1, s2
820; RV64I-NEXT:  .LBB12_7: # %start
821; RV64I-NEXT:    mv a0, s1
822; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
823; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
824; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
825; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
826; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
827; RV64I-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
828; RV64I-NEXT:    addi sp, sp, 48
829; RV64I-NEXT:    ret
830start:
831  %0 = tail call i64 @llvm.fptosi.sat.i64.f32(float %a)
832  ret i64 %0
833}
834declare i64 @llvm.fptosi.sat.i64.f32(float)
835
836define i64 @fcvt_lu_s(float %a) nounwind {
837; RV32IF-LABEL: fcvt_lu_s:
838; RV32IF:       # %bb.0:
839; RV32IF-NEXT:    addi sp, sp, -16
840; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
841; RV32IF-NEXT:    call __fixunssfdi@plt
842; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
843; RV32IF-NEXT:    addi sp, sp, 16
844; RV32IF-NEXT:    ret
845;
846; RV64IF-LABEL: fcvt_lu_s:
847; RV64IF:       # %bb.0:
848; RV64IF-NEXT:    fmv.w.x ft0, a0
849; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
850; RV64IF-NEXT:    ret
851;
852; RV32I-LABEL: fcvt_lu_s:
853; RV32I:       # %bb.0:
854; RV32I-NEXT:    addi sp, sp, -16
855; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
856; RV32I-NEXT:    call __fixunssfdi@plt
857; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
858; RV32I-NEXT:    addi sp, sp, 16
859; RV32I-NEXT:    ret
860;
861; RV64I-LABEL: fcvt_lu_s:
862; RV64I:       # %bb.0:
863; RV64I-NEXT:    addi sp, sp, -16
864; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
865; RV64I-NEXT:    call __fixunssfdi@plt
866; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
867; RV64I-NEXT:    addi sp, sp, 16
868; RV64I-NEXT:    ret
869  %1 = fptoui float %a to i64
870  ret i64 %1
871}
872
873define i64 @fcvt_lu_s_sat(float %a) nounwind {
874; RV32IF-LABEL: fcvt_lu_s_sat:
875; RV32IF:       # %bb.0: # %start
876; RV32IF-NEXT:    addi sp, sp, -16
877; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
878; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
879; RV32IF-NEXT:    fmv.w.x ft1, a0
880; RV32IF-NEXT:    fmv.w.x ft0, zero
881; RV32IF-NEXT:    fsw ft1, 4(sp) # 4-byte Folded Spill
882; RV32IF-NEXT:    fle.s s0, ft0, ft1
883; RV32IF-NEXT:    call __fixunssfdi@plt
884; RV32IF-NEXT:    mv a3, a0
885; RV32IF-NEXT:    bnez s0, .LBB14_2
886; RV32IF-NEXT:  # %bb.1: # %start
887; RV32IF-NEXT:    li a3, 0
888; RV32IF-NEXT:  .LBB14_2: # %start
889; RV32IF-NEXT:    lui a0, %hi(.LCPI14_0)
890; RV32IF-NEXT:    flw ft0, %lo(.LCPI14_0)(a0)
891; RV32IF-NEXT:    flw ft1, 4(sp) # 4-byte Folded Reload
892; RV32IF-NEXT:    flt.s a4, ft0, ft1
893; RV32IF-NEXT:    li a2, -1
894; RV32IF-NEXT:    li a0, -1
895; RV32IF-NEXT:    beqz a4, .LBB14_7
896; RV32IF-NEXT:  # %bb.3: # %start
897; RV32IF-NEXT:    beqz s0, .LBB14_8
898; RV32IF-NEXT:  .LBB14_4: # %start
899; RV32IF-NEXT:    bnez a4, .LBB14_6
900; RV32IF-NEXT:  .LBB14_5: # %start
901; RV32IF-NEXT:    mv a2, a1
902; RV32IF-NEXT:  .LBB14_6: # %start
903; RV32IF-NEXT:    mv a1, a2
904; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
905; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
906; RV32IF-NEXT:    addi sp, sp, 16
907; RV32IF-NEXT:    ret
908; RV32IF-NEXT:  .LBB14_7: # %start
909; RV32IF-NEXT:    mv a0, a3
910; RV32IF-NEXT:    bnez s0, .LBB14_4
911; RV32IF-NEXT:  .LBB14_8: # %start
912; RV32IF-NEXT:    li a1, 0
913; RV32IF-NEXT:    beqz a4, .LBB14_5
914; RV32IF-NEXT:    j .LBB14_6
915;
916; RV64IF-LABEL: fcvt_lu_s_sat:
917; RV64IF:       # %bb.0: # %start
918; RV64IF-NEXT:    fmv.w.x ft0, a0
919; RV64IF-NEXT:    feq.s a0, ft0, ft0
920; RV64IF-NEXT:    bnez a0, .LBB14_2
921; RV64IF-NEXT:  # %bb.1: # %start
922; RV64IF-NEXT:    li a0, 0
923; RV64IF-NEXT:    ret
924; RV64IF-NEXT:  .LBB14_2:
925; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
926; RV64IF-NEXT:    ret
927;
928; RV32I-LABEL: fcvt_lu_s_sat:
929; RV32I:       # %bb.0: # %start
930; RV32I-NEXT:    addi sp, sp, -32
931; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
932; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
933; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
934; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
935; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
936; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
937; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
938; RV32I-NEXT:    mv s0, a0
939; RV32I-NEXT:    li a1, 0
940; RV32I-NEXT:    call __gesf2@plt
941; RV32I-NEXT:    mv s1, a0
942; RV32I-NEXT:    mv a0, s0
943; RV32I-NEXT:    call __fixunssfdi@plt
944; RV32I-NEXT:    mv s2, a1
945; RV32I-NEXT:    li s5, 0
946; RV32I-NEXT:    bltz s1, .LBB14_2
947; RV32I-NEXT:  # %bb.1: # %start
948; RV32I-NEXT:    mv s5, a0
949; RV32I-NEXT:  .LBB14_2: # %start
950; RV32I-NEXT:    lui a0, 391168
951; RV32I-NEXT:    addi s1, a0, -1
952; RV32I-NEXT:    mv a0, s0
953; RV32I-NEXT:    mv a1, s1
954; RV32I-NEXT:    call __gtsf2@plt
955; RV32I-NEXT:    li s3, -1
956; RV32I-NEXT:    li s4, -1
957; RV32I-NEXT:    bgtz a0, .LBB14_4
958; RV32I-NEXT:  # %bb.3: # %start
959; RV32I-NEXT:    mv s4, s5
960; RV32I-NEXT:  .LBB14_4: # %start
961; RV32I-NEXT:    mv a0, s0
962; RV32I-NEXT:    li a1, 0
963; RV32I-NEXT:    call __gesf2@plt
964; RV32I-NEXT:    li s5, 0
965; RV32I-NEXT:    bltz a0, .LBB14_6
966; RV32I-NEXT:  # %bb.5: # %start
967; RV32I-NEXT:    mv s5, s2
968; RV32I-NEXT:  .LBB14_6: # %start
969; RV32I-NEXT:    mv a0, s0
970; RV32I-NEXT:    mv a1, s1
971; RV32I-NEXT:    call __gtsf2@plt
972; RV32I-NEXT:    bgtz a0, .LBB14_8
973; RV32I-NEXT:  # %bb.7: # %start
974; RV32I-NEXT:    mv s3, s5
975; RV32I-NEXT:  .LBB14_8: # %start
976; RV32I-NEXT:    mv a0, s4
977; RV32I-NEXT:    mv a1, s3
978; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
979; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
980; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
981; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
982; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
983; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
984; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
985; RV32I-NEXT:    addi sp, sp, 32
986; RV32I-NEXT:    ret
987;
988; RV64I-LABEL: fcvt_lu_s_sat:
989; RV64I:       # %bb.0: # %start
990; RV64I-NEXT:    addi sp, sp, -32
991; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
992; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
993; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
994; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
995; RV64I-NEXT:    mv s0, a0
996; RV64I-NEXT:    li a1, 0
997; RV64I-NEXT:    call __gesf2@plt
998; RV64I-NEXT:    mv s1, a0
999; RV64I-NEXT:    mv a0, s0
1000; RV64I-NEXT:    call __fixunssfdi@plt
1001; RV64I-NEXT:    li s2, 0
1002; RV64I-NEXT:    bltz s1, .LBB14_2
1003; RV64I-NEXT:  # %bb.1: # %start
1004; RV64I-NEXT:    mv s2, a0
1005; RV64I-NEXT:  .LBB14_2: # %start
1006; RV64I-NEXT:    lui a0, 391168
1007; RV64I-NEXT:    addiw a1, a0, -1
1008; RV64I-NEXT:    mv a0, s0
1009; RV64I-NEXT:    call __gtsf2@plt
1010; RV64I-NEXT:    mv a1, a0
1011; RV64I-NEXT:    li a0, -1
1012; RV64I-NEXT:    bgtz a1, .LBB14_4
1013; RV64I-NEXT:  # %bb.3: # %start
1014; RV64I-NEXT:    mv a0, s2
1015; RV64I-NEXT:  .LBB14_4: # %start
1016; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1017; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1018; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1019; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
1020; RV64I-NEXT:    addi sp, sp, 32
1021; RV64I-NEXT:    ret
1022start:
1023  %0 = tail call i64 @llvm.fptoui.sat.i64.f32(float %a)
1024  ret i64 %0
1025}
1026declare i64 @llvm.fptoui.sat.i64.f32(float)
1027
1028define float @fcvt_s_l(i64 %a) nounwind {
1029; RV32IF-LABEL: fcvt_s_l:
1030; RV32IF:       # %bb.0:
1031; RV32IF-NEXT:    addi sp, sp, -16
1032; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1033; RV32IF-NEXT:    call __floatdisf@plt
1034; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1035; RV32IF-NEXT:    addi sp, sp, 16
1036; RV32IF-NEXT:    ret
1037;
1038; RV64IF-LABEL: fcvt_s_l:
1039; RV64IF:       # %bb.0:
1040; RV64IF-NEXT:    fcvt.s.l ft0, a0
1041; RV64IF-NEXT:    fmv.x.w a0, ft0
1042; RV64IF-NEXT:    ret
1043;
1044; RV32I-LABEL: fcvt_s_l:
1045; RV32I:       # %bb.0:
1046; RV32I-NEXT:    addi sp, sp, -16
1047; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1048; RV32I-NEXT:    call __floatdisf@plt
1049; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1050; RV32I-NEXT:    addi sp, sp, 16
1051; RV32I-NEXT:    ret
1052;
1053; RV64I-LABEL: fcvt_s_l:
1054; RV64I:       # %bb.0:
1055; RV64I-NEXT:    addi sp, sp, -16
1056; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1057; RV64I-NEXT:    call __floatdisf@plt
1058; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1059; RV64I-NEXT:    addi sp, sp, 16
1060; RV64I-NEXT:    ret
1061  %1 = sitofp i64 %a to float
1062  ret float %1
1063}
1064
1065define float @fcvt_s_lu(i64 %a) nounwind {
1066; RV32IF-LABEL: fcvt_s_lu:
1067; RV32IF:       # %bb.0:
1068; RV32IF-NEXT:    addi sp, sp, -16
1069; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1070; RV32IF-NEXT:    call __floatundisf@plt
1071; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1072; RV32IF-NEXT:    addi sp, sp, 16
1073; RV32IF-NEXT:    ret
1074;
1075; RV64IF-LABEL: fcvt_s_lu:
1076; RV64IF:       # %bb.0:
1077; RV64IF-NEXT:    fcvt.s.lu ft0, a0
1078; RV64IF-NEXT:    fmv.x.w a0, ft0
1079; RV64IF-NEXT:    ret
1080;
1081; RV32I-LABEL: fcvt_s_lu:
1082; RV32I:       # %bb.0:
1083; RV32I-NEXT:    addi sp, sp, -16
1084; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1085; RV32I-NEXT:    call __floatundisf@plt
1086; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1087; RV32I-NEXT:    addi sp, sp, 16
1088; RV32I-NEXT:    ret
1089;
1090; RV64I-LABEL: fcvt_s_lu:
1091; RV64I:       # %bb.0:
1092; RV64I-NEXT:    addi sp, sp, -16
1093; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1094; RV64I-NEXT:    call __floatundisf@plt
1095; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1096; RV64I-NEXT:    addi sp, sp, 16
1097; RV64I-NEXT:    ret
1098  %1 = uitofp i64 %a to float
1099  ret float %1
1100}
1101
1102define float @fcvt_s_w_i8(i8 signext %a) nounwind {
1103; RV32IF-LABEL: fcvt_s_w_i8:
1104; RV32IF:       # %bb.0:
1105; RV32IF-NEXT:    fcvt.s.w ft0, a0
1106; RV32IF-NEXT:    fmv.x.w a0, ft0
1107; RV32IF-NEXT:    ret
1108;
1109; RV64IF-LABEL: fcvt_s_w_i8:
1110; RV64IF:       # %bb.0:
1111; RV64IF-NEXT:    fcvt.s.w ft0, a0
1112; RV64IF-NEXT:    fmv.x.w a0, ft0
1113; RV64IF-NEXT:    ret
1114;
1115; RV32I-LABEL: fcvt_s_w_i8:
1116; RV32I:       # %bb.0:
1117; RV32I-NEXT:    addi sp, sp, -16
1118; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1119; RV32I-NEXT:    call __floatsisf@plt
1120; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1121; RV32I-NEXT:    addi sp, sp, 16
1122; RV32I-NEXT:    ret
1123;
1124; RV64I-LABEL: fcvt_s_w_i8:
1125; RV64I:       # %bb.0:
1126; RV64I-NEXT:    addi sp, sp, -16
1127; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1128; RV64I-NEXT:    call __floatsisf@plt
1129; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1130; RV64I-NEXT:    addi sp, sp, 16
1131; RV64I-NEXT:    ret
1132  %1 = sitofp i8 %a to float
1133  ret float %1
1134}
1135
1136define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
1137; RV32IF-LABEL: fcvt_s_wu_i8:
1138; RV32IF:       # %bb.0:
1139; RV32IF-NEXT:    fcvt.s.wu ft0, a0
1140; RV32IF-NEXT:    fmv.x.w a0, ft0
1141; RV32IF-NEXT:    ret
1142;
1143; RV64IF-LABEL: fcvt_s_wu_i8:
1144; RV64IF:       # %bb.0:
1145; RV64IF-NEXT:    fcvt.s.wu ft0, a0
1146; RV64IF-NEXT:    fmv.x.w a0, ft0
1147; RV64IF-NEXT:    ret
1148;
1149; RV32I-LABEL: fcvt_s_wu_i8:
1150; RV32I:       # %bb.0:
1151; RV32I-NEXT:    addi sp, sp, -16
1152; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1153; RV32I-NEXT:    call __floatunsisf@plt
1154; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1155; RV32I-NEXT:    addi sp, sp, 16
1156; RV32I-NEXT:    ret
1157;
1158; RV64I-LABEL: fcvt_s_wu_i8:
1159; RV64I:       # %bb.0:
1160; RV64I-NEXT:    addi sp, sp, -16
1161; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1162; RV64I-NEXT:    call __floatunsisf@plt
1163; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1164; RV64I-NEXT:    addi sp, sp, 16
1165; RV64I-NEXT:    ret
1166  %1 = uitofp i8 %a to float
1167  ret float %1
1168}
1169
1170define float @fcvt_s_w_i16(i16 signext %a) nounwind {
1171; RV32IF-LABEL: fcvt_s_w_i16:
1172; RV32IF:       # %bb.0:
1173; RV32IF-NEXT:    fcvt.s.w ft0, a0
1174; RV32IF-NEXT:    fmv.x.w a0, ft0
1175; RV32IF-NEXT:    ret
1176;
1177; RV64IF-LABEL: fcvt_s_w_i16:
1178; RV64IF:       # %bb.0:
1179; RV64IF-NEXT:    fcvt.s.w ft0, a0
1180; RV64IF-NEXT:    fmv.x.w a0, ft0
1181; RV64IF-NEXT:    ret
1182;
1183; RV32I-LABEL: fcvt_s_w_i16:
1184; RV32I:       # %bb.0:
1185; RV32I-NEXT:    addi sp, sp, -16
1186; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1187; RV32I-NEXT:    call __floatsisf@plt
1188; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1189; RV32I-NEXT:    addi sp, sp, 16
1190; RV32I-NEXT:    ret
1191;
1192; RV64I-LABEL: fcvt_s_w_i16:
1193; RV64I:       # %bb.0:
1194; RV64I-NEXT:    addi sp, sp, -16
1195; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1196; RV64I-NEXT:    call __floatsisf@plt
1197; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1198; RV64I-NEXT:    addi sp, sp, 16
1199; RV64I-NEXT:    ret
1200  %1 = sitofp i16 %a to float
1201  ret float %1
1202}
1203
1204define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
1205; RV32IF-LABEL: fcvt_s_wu_i16:
1206; RV32IF:       # %bb.0:
1207; RV32IF-NEXT:    fcvt.s.wu ft0, a0
1208; RV32IF-NEXT:    fmv.x.w a0, ft0
1209; RV32IF-NEXT:    ret
1210;
1211; RV64IF-LABEL: fcvt_s_wu_i16:
1212; RV64IF:       # %bb.0:
1213; RV64IF-NEXT:    fcvt.s.wu ft0, a0
1214; RV64IF-NEXT:    fmv.x.w a0, ft0
1215; RV64IF-NEXT:    ret
1216;
1217; RV32I-LABEL: fcvt_s_wu_i16:
1218; RV32I:       # %bb.0:
1219; RV32I-NEXT:    addi sp, sp, -16
1220; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1221; RV32I-NEXT:    call __floatunsisf@plt
1222; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1223; RV32I-NEXT:    addi sp, sp, 16
1224; RV32I-NEXT:    ret
1225;
1226; RV64I-LABEL: fcvt_s_wu_i16:
1227; RV64I:       # %bb.0:
1228; RV64I-NEXT:    addi sp, sp, -16
1229; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1230; RV64I-NEXT:    call __floatunsisf@plt
1231; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1232; RV64I-NEXT:    addi sp, sp, 16
1233; RV64I-NEXT:    ret
1234  %1 = uitofp i16 %a to float
1235  ret float %1
1236}
1237
1238; Make sure we select W version of addi on RV64.
1239define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, float* %1) {
1240; RV32IF-LABEL: fcvt_s_w_demanded_bits:
1241; RV32IF:       # %bb.0:
1242; RV32IF-NEXT:    addi a0, a0, 1
1243; RV32IF-NEXT:    fcvt.s.w ft0, a0
1244; RV32IF-NEXT:    fsw ft0, 0(a1)
1245; RV32IF-NEXT:    ret
1246;
1247; RV64IF-LABEL: fcvt_s_w_demanded_bits:
1248; RV64IF:       # %bb.0:
1249; RV64IF-NEXT:    addiw a0, a0, 1
1250; RV64IF-NEXT:    fcvt.s.w ft0, a0
1251; RV64IF-NEXT:    fsw ft0, 0(a1)
1252; RV64IF-NEXT:    ret
1253;
1254; RV32I-LABEL: fcvt_s_w_demanded_bits:
1255; RV32I:       # %bb.0:
1256; RV32I-NEXT:    addi sp, sp, -16
1257; RV32I-NEXT:    .cfi_def_cfa_offset 16
1258; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1259; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1260; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1261; RV32I-NEXT:    .cfi_offset ra, -4
1262; RV32I-NEXT:    .cfi_offset s0, -8
1263; RV32I-NEXT:    .cfi_offset s1, -12
1264; RV32I-NEXT:    mv s0, a1
1265; RV32I-NEXT:    addi s1, a0, 1
1266; RV32I-NEXT:    mv a0, s1
1267; RV32I-NEXT:    call __floatsisf@plt
1268; RV32I-NEXT:    sw a0, 0(s0)
1269; RV32I-NEXT:    mv a0, s1
1270; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1271; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1272; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1273; RV32I-NEXT:    addi sp, sp, 16
1274; RV32I-NEXT:    ret
1275;
1276; RV64I-LABEL: fcvt_s_w_demanded_bits:
1277; RV64I:       # %bb.0:
1278; RV64I-NEXT:    addi sp, sp, -32
1279; RV64I-NEXT:    .cfi_def_cfa_offset 32
1280; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1281; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1282; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1283; RV64I-NEXT:    .cfi_offset ra, -8
1284; RV64I-NEXT:    .cfi_offset s0, -16
1285; RV64I-NEXT:    .cfi_offset s1, -24
1286; RV64I-NEXT:    mv s0, a1
1287; RV64I-NEXT:    addiw s1, a0, 1
1288; RV64I-NEXT:    mv a0, s1
1289; RV64I-NEXT:    call __floatsisf@plt
1290; RV64I-NEXT:    sw a0, 0(s0)
1291; RV64I-NEXT:    mv a0, s1
1292; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1293; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1294; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1295; RV64I-NEXT:    addi sp, sp, 32
1296; RV64I-NEXT:    ret
1297  %3 = add i32 %0, 1
1298  %4 = sitofp i32 %3 to float
1299  store float %4, float* %1, align 4
1300  ret i32 %3
1301}
1302
1303; Make sure we select W version of addi on RV64.
1304define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, float* %1) {
1305; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
1306; RV32IF:       # %bb.0:
1307; RV32IF-NEXT:    addi a0, a0, 1
1308; RV32IF-NEXT:    fcvt.s.wu ft0, a0
1309; RV32IF-NEXT:    fsw ft0, 0(a1)
1310; RV32IF-NEXT:    ret
1311;
1312; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
1313; RV64IF:       # %bb.0:
1314; RV64IF-NEXT:    addiw a0, a0, 1
1315; RV64IF-NEXT:    fcvt.s.wu ft0, a0
1316; RV64IF-NEXT:    fsw ft0, 0(a1)
1317; RV64IF-NEXT:    ret
1318;
1319; RV32I-LABEL: fcvt_s_wu_demanded_bits:
1320; RV32I:       # %bb.0:
1321; RV32I-NEXT:    addi sp, sp, -16
1322; RV32I-NEXT:    .cfi_def_cfa_offset 16
1323; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1324; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1325; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1326; RV32I-NEXT:    .cfi_offset ra, -4
1327; RV32I-NEXT:    .cfi_offset s0, -8
1328; RV32I-NEXT:    .cfi_offset s1, -12
1329; RV32I-NEXT:    mv s0, a1
1330; RV32I-NEXT:    addi s1, a0, 1
1331; RV32I-NEXT:    mv a0, s1
1332; RV32I-NEXT:    call __floatunsisf@plt
1333; RV32I-NEXT:    sw a0, 0(s0)
1334; RV32I-NEXT:    mv a0, s1
1335; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1336; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1337; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1338; RV32I-NEXT:    addi sp, sp, 16
1339; RV32I-NEXT:    ret
1340;
1341; RV64I-LABEL: fcvt_s_wu_demanded_bits:
1342; RV64I:       # %bb.0:
1343; RV64I-NEXT:    addi sp, sp, -32
1344; RV64I-NEXT:    .cfi_def_cfa_offset 32
1345; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1346; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1347; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1348; RV64I-NEXT:    .cfi_offset ra, -8
1349; RV64I-NEXT:    .cfi_offset s0, -16
1350; RV64I-NEXT:    .cfi_offset s1, -24
1351; RV64I-NEXT:    mv s0, a1
1352; RV64I-NEXT:    addiw s1, a0, 1
1353; RV64I-NEXT:    mv a0, s1
1354; RV64I-NEXT:    call __floatunsisf@plt
1355; RV64I-NEXT:    sw a0, 0(s0)
1356; RV64I-NEXT:    mv a0, s1
1357; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1358; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1359; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1360; RV64I-NEXT:    addi sp, sp, 32
1361; RV64I-NEXT:    ret
1362  %3 = add i32 %0, 1
1363  %4 = uitofp i32 %3 to float
1364  store float %4, float* %1, align 4
1365  ret i32 %3
1366}
1367