1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
4; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
6; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV32I %s
8; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64I %s
10
11define i32 @fcvt_w_s(float %a) nounwind {
12; RV32IF-LABEL: fcvt_w_s:
13; RV32IF:       # %bb.0:
14; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
15; RV32IF-NEXT:    ret
16;
17; RV64IF-LABEL: fcvt_w_s:
18; RV64IF:       # %bb.0:
19; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
20; RV64IF-NEXT:    ret
21;
22; RV32I-LABEL: fcvt_w_s:
23; RV32I:       # %bb.0:
24; RV32I-NEXT:    addi sp, sp, -16
25; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
26; RV32I-NEXT:    call __fixsfsi@plt
27; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
28; RV32I-NEXT:    addi sp, sp, 16
29; RV32I-NEXT:    ret
30;
31; RV64I-LABEL: fcvt_w_s:
32; RV64I:       # %bb.0:
33; RV64I-NEXT:    addi sp, sp, -16
34; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
35; RV64I-NEXT:    call __fixsfsi@plt
36; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
37; RV64I-NEXT:    addi sp, sp, 16
38; RV64I-NEXT:    ret
39  %1 = fptosi float %a to i32
40  ret i32 %1
41}
42
43define i32 @fcvt_w_s_sat(float %a) nounwind {
44; RV32IF-LABEL: fcvt_w_s_sat:
45; RV32IF:       # %bb.0: # %start
46; RV32IF-NEXT:    feq.s a0, fa0, fa0
47; RV32IF-NEXT:    bnez a0, .LBB1_2
48; RV32IF-NEXT:  # %bb.1: # %start
49; RV32IF-NEXT:    li a0, 0
50; RV32IF-NEXT:    ret
51; RV32IF-NEXT:  .LBB1_2:
52; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
53; RV32IF-NEXT:    ret
54;
55; RV64IF-LABEL: fcvt_w_s_sat:
56; RV64IF:       # %bb.0: # %start
57; RV64IF-NEXT:    feq.s a0, fa0, fa0
58; RV64IF-NEXT:    bnez a0, .LBB1_2
59; RV64IF-NEXT:  # %bb.1: # %start
60; RV64IF-NEXT:    li a0, 0
61; RV64IF-NEXT:    ret
62; RV64IF-NEXT:  .LBB1_2:
63; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
64; RV64IF-NEXT:    ret
65;
66; RV32I-LABEL: fcvt_w_s_sat:
67; RV32I:       # %bb.0: # %start
68; RV32I-NEXT:    addi sp, sp, -32
69; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
70; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
71; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
72; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
73; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
74; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
75; RV32I-NEXT:    mv s0, a0
76; RV32I-NEXT:    lui a1, 847872
77; RV32I-NEXT:    call __gesf2@plt
78; RV32I-NEXT:    mv s2, a0
79; RV32I-NEXT:    mv a0, s0
80; RV32I-NEXT:    call __fixsfsi@plt
81; RV32I-NEXT:    li s1, 0
82; RV32I-NEXT:    lui s4, 524288
83; RV32I-NEXT:    lui s3, 524288
84; RV32I-NEXT:    bltz s2, .LBB1_2
85; RV32I-NEXT:  # %bb.1: # %start
86; RV32I-NEXT:    mv s3, a0
87; RV32I-NEXT:  .LBB1_2: # %start
88; RV32I-NEXT:    lui a0, 323584
89; RV32I-NEXT:    addi a1, a0, -1
90; RV32I-NEXT:    mv a0, s0
91; RV32I-NEXT:    call __gtsf2@plt
92; RV32I-NEXT:    bge s1, a0, .LBB1_4
93; RV32I-NEXT:  # %bb.3:
94; RV32I-NEXT:    addi s3, s4, -1
95; RV32I-NEXT:  .LBB1_4: # %start
96; RV32I-NEXT:    mv a0, s0
97; RV32I-NEXT:    mv a1, s0
98; RV32I-NEXT:    call __unordsf2@plt
99; RV32I-NEXT:    bne a0, s1, .LBB1_6
100; RV32I-NEXT:  # %bb.5: # %start
101; RV32I-NEXT:    mv s1, s3
102; RV32I-NEXT:  .LBB1_6: # %start
103; RV32I-NEXT:    mv a0, s1
104; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
105; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
106; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
107; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
108; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
109; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
110; RV32I-NEXT:    addi sp, sp, 32
111; RV32I-NEXT:    ret
112;
113; RV64I-LABEL: fcvt_w_s_sat:
114; RV64I:       # %bb.0: # %start
115; RV64I-NEXT:    addi sp, sp, -48
116; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
117; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
118; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
119; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
120; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
121; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
122; RV64I-NEXT:    mv s0, a0
123; RV64I-NEXT:    lui a1, 847872
124; RV64I-NEXT:    call __gesf2@plt
125; RV64I-NEXT:    mv s2, a0
126; RV64I-NEXT:    mv a0, s0
127; RV64I-NEXT:    call __fixsfdi@plt
128; RV64I-NEXT:    li s1, 0
129; RV64I-NEXT:    lui s4, 524288
130; RV64I-NEXT:    lui s3, 524288
131; RV64I-NEXT:    bltz s2, .LBB1_2
132; RV64I-NEXT:  # %bb.1: # %start
133; RV64I-NEXT:    mv s3, a0
134; RV64I-NEXT:  .LBB1_2: # %start
135; RV64I-NEXT:    lui a0, 323584
136; RV64I-NEXT:    addiw a1, a0, -1
137; RV64I-NEXT:    mv a0, s0
138; RV64I-NEXT:    call __gtsf2@plt
139; RV64I-NEXT:    bge s1, a0, .LBB1_4
140; RV64I-NEXT:  # %bb.3:
141; RV64I-NEXT:    addiw s3, s4, -1
142; RV64I-NEXT:  .LBB1_4: # %start
143; RV64I-NEXT:    mv a0, s0
144; RV64I-NEXT:    mv a1, s0
145; RV64I-NEXT:    call __unordsf2@plt
146; RV64I-NEXT:    bne a0, s1, .LBB1_6
147; RV64I-NEXT:  # %bb.5: # %start
148; RV64I-NEXT:    mv s1, s3
149; RV64I-NEXT:  .LBB1_6: # %start
150; RV64I-NEXT:    mv a0, s1
151; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
152; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
153; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
154; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
155; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
156; RV64I-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
157; RV64I-NEXT:    addi sp, sp, 48
158; RV64I-NEXT:    ret
159start:
160  %0 = tail call i32 @llvm.fptosi.sat.i32.f32(float %a)
161  ret i32 %0
162}
163declare i32 @llvm.fptosi.sat.i32.f32(float)
164
165define i32 @fcvt_wu_s(float %a) nounwind {
166; RV32IF-LABEL: fcvt_wu_s:
167; RV32IF:       # %bb.0:
168; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
169; RV32IF-NEXT:    ret
170;
171; RV64IF-LABEL: fcvt_wu_s:
172; RV64IF:       # %bb.0:
173; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
174; RV64IF-NEXT:    ret
175;
176; RV32I-LABEL: fcvt_wu_s:
177; RV32I:       # %bb.0:
178; RV32I-NEXT:    addi sp, sp, -16
179; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
180; RV32I-NEXT:    call __fixunssfsi@plt
181; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
182; RV32I-NEXT:    addi sp, sp, 16
183; RV32I-NEXT:    ret
184;
185; RV64I-LABEL: fcvt_wu_s:
186; RV64I:       # %bb.0:
187; RV64I-NEXT:    addi sp, sp, -16
188; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
189; RV64I-NEXT:    call __fixunssfsi@plt
190; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
191; RV64I-NEXT:    addi sp, sp, 16
192; RV64I-NEXT:    ret
193  %1 = fptoui float %a to i32
194  ret i32 %1
195}
196
197; Test where the fptoui has multiple uses, one of which causes a sext to be
198; inserted on RV64.
199define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
200; RV32IF-LABEL: fcvt_wu_s_multiple_use:
201; RV32IF:       # %bb.0:
202; RV32IF-NEXT:    fcvt.wu.s a1, fa0, rtz
203; RV32IF-NEXT:    li a0, 1
204; RV32IF-NEXT:    beqz a1, .LBB3_2
205; RV32IF-NEXT:  # %bb.1:
206; RV32IF-NEXT:    mv a0, a1
207; RV32IF-NEXT:  .LBB3_2:
208; RV32IF-NEXT:    ret
209;
210; RV64IF-LABEL: fcvt_wu_s_multiple_use:
211; RV64IF:       # %bb.0:
212; RV64IF-NEXT:    fcvt.wu.s a1, fa0, rtz
213; RV64IF-NEXT:    li a0, 1
214; RV64IF-NEXT:    beqz a1, .LBB3_2
215; RV64IF-NEXT:  # %bb.1:
216; RV64IF-NEXT:    mv a0, a1
217; RV64IF-NEXT:  .LBB3_2:
218; RV64IF-NEXT:    ret
219;
220; RV32I-LABEL: fcvt_wu_s_multiple_use:
221; RV32I:       # %bb.0:
222; RV32I-NEXT:    addi sp, sp, -16
223; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
224; RV32I-NEXT:    call __fixunssfsi@plt
225; RV32I-NEXT:    mv a1, a0
226; RV32I-NEXT:    li a0, 1
227; RV32I-NEXT:    beqz a1, .LBB3_2
228; RV32I-NEXT:  # %bb.1:
229; RV32I-NEXT:    mv a0, a1
230; RV32I-NEXT:  .LBB3_2:
231; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
232; RV32I-NEXT:    addi sp, sp, 16
233; RV32I-NEXT:    ret
234;
235; RV64I-LABEL: fcvt_wu_s_multiple_use:
236; RV64I:       # %bb.0:
237; RV64I-NEXT:    addi sp, sp, -16
238; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
239; RV64I-NEXT:    call __fixunssfsi@plt
240; RV64I-NEXT:    mv a1, a0
241; RV64I-NEXT:    li a0, 1
242; RV64I-NEXT:    beqz a1, .LBB3_2
243; RV64I-NEXT:  # %bb.1:
244; RV64I-NEXT:    mv a0, a1
245; RV64I-NEXT:  .LBB3_2:
246; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
247; RV64I-NEXT:    addi sp, sp, 16
248; RV64I-NEXT:    ret
249  %a = fptoui float %x to i32
250  %b = icmp eq i32 %a, 0
251  %c = select i1 %b, i32 1, i32 %a
252  ret i32 %c
253}
254
255define i32 @fcvt_wu_s_sat(float %a) nounwind {
256; RV32IF-LABEL: fcvt_wu_s_sat:
257; RV32IF:       # %bb.0: # %start
258; RV32IF-NEXT:    feq.s a0, fa0, fa0
259; RV32IF-NEXT:    bnez a0, .LBB4_2
260; RV32IF-NEXT:  # %bb.1: # %start
261; RV32IF-NEXT:    li a0, 0
262; RV32IF-NEXT:    ret
263; RV32IF-NEXT:  .LBB4_2:
264; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
265; RV32IF-NEXT:    ret
266;
267; RV64IF-LABEL: fcvt_wu_s_sat:
268; RV64IF:       # %bb.0: # %start
269; RV64IF-NEXT:    feq.s a0, fa0, fa0
270; RV64IF-NEXT:    bnez a0, .LBB4_2
271; RV64IF-NEXT:  # %bb.1: # %start
272; RV64IF-NEXT:    li a0, 0
273; RV64IF-NEXT:    ret
274; RV64IF-NEXT:  .LBB4_2:
275; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
276; RV64IF-NEXT:    ret
277;
278; RV32I-LABEL: fcvt_wu_s_sat:
279; RV32I:       # %bb.0: # %start
280; RV32I-NEXT:    addi sp, sp, -16
281; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
282; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
283; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
284; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
285; RV32I-NEXT:    mv s0, a0
286; RV32I-NEXT:    li a1, 0
287; RV32I-NEXT:    call __gesf2@plt
288; RV32I-NEXT:    mv s1, a0
289; RV32I-NEXT:    mv a0, s0
290; RV32I-NEXT:    call __fixunssfsi@plt
291; RV32I-NEXT:    li s2, 0
292; RV32I-NEXT:    bltz s1, .LBB4_2
293; RV32I-NEXT:  # %bb.1: # %start
294; RV32I-NEXT:    mv s2, a0
295; RV32I-NEXT:  .LBB4_2: # %start
296; RV32I-NEXT:    lui a0, 325632
297; RV32I-NEXT:    addi a1, a0, -1
298; RV32I-NEXT:    mv a0, s0
299; RV32I-NEXT:    call __gtsf2@plt
300; RV32I-NEXT:    mv a1, a0
301; RV32I-NEXT:    li a0, -1
302; RV32I-NEXT:    bgtz a1, .LBB4_4
303; RV32I-NEXT:  # %bb.3: # %start
304; RV32I-NEXT:    mv a0, s2
305; RV32I-NEXT:  .LBB4_4: # %start
306; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
307; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
308; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
309; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
310; RV32I-NEXT:    addi sp, sp, 16
311; RV32I-NEXT:    ret
312;
313; RV64I-LABEL: fcvt_wu_s_sat:
314; RV64I:       # %bb.0: # %start
315; RV64I-NEXT:    addi sp, sp, -32
316; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
317; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
318; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
319; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
320; RV64I-NEXT:    mv s0, a0
321; RV64I-NEXT:    li a1, 0
322; RV64I-NEXT:    call __gesf2@plt
323; RV64I-NEXT:    mv s2, a0
324; RV64I-NEXT:    mv a0, s0
325; RV64I-NEXT:    call __fixunssfdi@plt
326; RV64I-NEXT:    li s1, 0
327; RV64I-NEXT:    bltz s2, .LBB4_2
328; RV64I-NEXT:  # %bb.1: # %start
329; RV64I-NEXT:    mv s1, a0
330; RV64I-NEXT:  .LBB4_2: # %start
331; RV64I-NEXT:    lui a0, 325632
332; RV64I-NEXT:    addiw a1, a0, -1
333; RV64I-NEXT:    mv a0, s0
334; RV64I-NEXT:    call __gtsf2@plt
335; RV64I-NEXT:    blez a0, .LBB4_4
336; RV64I-NEXT:  # %bb.3:
337; RV64I-NEXT:    li a0, -1
338; RV64I-NEXT:    srli s1, a0, 32
339; RV64I-NEXT:  .LBB4_4: # %start
340; RV64I-NEXT:    mv a0, s1
341; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
342; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
343; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
344; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
345; RV64I-NEXT:    addi sp, sp, 32
346; RV64I-NEXT:    ret
347start:
348  %0 = tail call i32 @llvm.fptoui.sat.i32.f32(float %a)
349  ret i32 %0
350}
351declare i32 @llvm.fptoui.sat.i32.f32(float)
352
353define i32 @fmv_x_w(float %a, float %b) nounwind {
354; RV32IF-LABEL: fmv_x_w:
355; RV32IF:       # %bb.0:
356; RV32IF-NEXT:    fadd.s ft0, fa0, fa1
357; RV32IF-NEXT:    fmv.x.w a0, ft0
358; RV32IF-NEXT:    ret
359;
360; RV64IF-LABEL: fmv_x_w:
361; RV64IF:       # %bb.0:
362; RV64IF-NEXT:    fadd.s ft0, fa0, fa1
363; RV64IF-NEXT:    fmv.x.w a0, ft0
364; RV64IF-NEXT:    ret
365;
366; RV32I-LABEL: fmv_x_w:
367; RV32I:       # %bb.0:
368; RV32I-NEXT:    addi sp, sp, -16
369; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
370; RV32I-NEXT:    call __addsf3@plt
371; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
372; RV32I-NEXT:    addi sp, sp, 16
373; RV32I-NEXT:    ret
374;
375; RV64I-LABEL: fmv_x_w:
376; RV64I:       # %bb.0:
377; RV64I-NEXT:    addi sp, sp, -16
378; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
379; RV64I-NEXT:    call __addsf3@plt
380; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
381; RV64I-NEXT:    addi sp, sp, 16
382; RV64I-NEXT:    ret
383; Ensure fmv.x.w is generated even for a soft float calling convention
384  %1 = fadd float %a, %b
385  %2 = bitcast float %1 to i32
386  ret i32 %2
387}
388
389define float @fcvt_s_w(i32 %a) nounwind {
390; RV32IF-LABEL: fcvt_s_w:
391; RV32IF:       # %bb.0:
392; RV32IF-NEXT:    fcvt.s.w fa0, a0
393; RV32IF-NEXT:    ret
394;
395; RV64IF-LABEL: fcvt_s_w:
396; RV64IF:       # %bb.0:
397; RV64IF-NEXT:    fcvt.s.w fa0, a0
398; RV64IF-NEXT:    ret
399;
400; RV32I-LABEL: fcvt_s_w:
401; RV32I:       # %bb.0:
402; RV32I-NEXT:    addi sp, sp, -16
403; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
404; RV32I-NEXT:    call __floatsisf@plt
405; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
406; RV32I-NEXT:    addi sp, sp, 16
407; RV32I-NEXT:    ret
408;
409; RV64I-LABEL: fcvt_s_w:
410; RV64I:       # %bb.0:
411; RV64I-NEXT:    addi sp, sp, -16
412; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
413; RV64I-NEXT:    sext.w a0, a0
414; RV64I-NEXT:    call __floatsisf@plt
415; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
416; RV64I-NEXT:    addi sp, sp, 16
417; RV64I-NEXT:    ret
418  %1 = sitofp i32 %a to float
419  ret float %1
420}
421
422define float @fcvt_s_w_load(i32* %p) nounwind {
423; RV32IF-LABEL: fcvt_s_w_load:
424; RV32IF:       # %bb.0:
425; RV32IF-NEXT:    lw a0, 0(a0)
426; RV32IF-NEXT:    fcvt.s.w fa0, a0
427; RV32IF-NEXT:    ret
428;
429; RV64IF-LABEL: fcvt_s_w_load:
430; RV64IF:       # %bb.0:
431; RV64IF-NEXT:    lw a0, 0(a0)
432; RV64IF-NEXT:    fcvt.s.w fa0, a0
433; RV64IF-NEXT:    ret
434;
435; RV32I-LABEL: fcvt_s_w_load:
436; RV32I:       # %bb.0:
437; RV32I-NEXT:    addi sp, sp, -16
438; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
439; RV32I-NEXT:    lw a0, 0(a0)
440; RV32I-NEXT:    call __floatsisf@plt
441; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
442; RV32I-NEXT:    addi sp, sp, 16
443; RV32I-NEXT:    ret
444;
445; RV64I-LABEL: fcvt_s_w_load:
446; RV64I:       # %bb.0:
447; RV64I-NEXT:    addi sp, sp, -16
448; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
449; RV64I-NEXT:    lw a0, 0(a0)
450; RV64I-NEXT:    call __floatsisf@plt
451; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
452; RV64I-NEXT:    addi sp, sp, 16
453; RV64I-NEXT:    ret
454  %a = load i32, i32* %p
455  %1 = sitofp i32 %a to float
456  ret float %1
457}
458
459define float @fcvt_s_wu(i32 %a) nounwind {
460; RV32IF-LABEL: fcvt_s_wu:
461; RV32IF:       # %bb.0:
462; RV32IF-NEXT:    fcvt.s.wu fa0, a0
463; RV32IF-NEXT:    ret
464;
465; RV64IF-LABEL: fcvt_s_wu:
466; RV64IF:       # %bb.0:
467; RV64IF-NEXT:    fcvt.s.wu fa0, a0
468; RV64IF-NEXT:    ret
469;
470; RV32I-LABEL: fcvt_s_wu:
471; RV32I:       # %bb.0:
472; RV32I-NEXT:    addi sp, sp, -16
473; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
474; RV32I-NEXT:    call __floatunsisf@plt
475; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
476; RV32I-NEXT:    addi sp, sp, 16
477; RV32I-NEXT:    ret
478;
479; RV64I-LABEL: fcvt_s_wu:
480; RV64I:       # %bb.0:
481; RV64I-NEXT:    addi sp, sp, -16
482; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
483; RV64I-NEXT:    sext.w a0, a0
484; RV64I-NEXT:    call __floatunsisf@plt
485; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
486; RV64I-NEXT:    addi sp, sp, 16
487; RV64I-NEXT:    ret
488  %1 = uitofp i32 %a to float
489  ret float %1
490}
491
492define float @fcvt_s_wu_load(i32* %p) nounwind {
493; RV32IF-LABEL: fcvt_s_wu_load:
494; RV32IF:       # %bb.0:
495; RV32IF-NEXT:    lw a0, 0(a0)
496; RV32IF-NEXT:    fcvt.s.wu fa0, a0
497; RV32IF-NEXT:    ret
498;
499; RV64IF-LABEL: fcvt_s_wu_load:
500; RV64IF:       # %bb.0:
501; RV64IF-NEXT:    lwu a0, 0(a0)
502; RV64IF-NEXT:    fcvt.s.wu fa0, a0
503; RV64IF-NEXT:    ret
504;
505; RV32I-LABEL: fcvt_s_wu_load:
506; RV32I:       # %bb.0:
507; RV32I-NEXT:    addi sp, sp, -16
508; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
509; RV32I-NEXT:    lw a0, 0(a0)
510; RV32I-NEXT:    call __floatunsisf@plt
511; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
512; RV32I-NEXT:    addi sp, sp, 16
513; RV32I-NEXT:    ret
514;
515; RV64I-LABEL: fcvt_s_wu_load:
516; RV64I:       # %bb.0:
517; RV64I-NEXT:    addi sp, sp, -16
518; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
519; RV64I-NEXT:    lw a0, 0(a0)
520; RV64I-NEXT:    call __floatunsisf@plt
521; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
522; RV64I-NEXT:    addi sp, sp, 16
523; RV64I-NEXT:    ret
524  %a = load i32, i32* %p
525  %1 = uitofp i32 %a to float
526  ret float %1
527}
528
529define float @fmv_w_x(i32 %a, i32 %b) nounwind {
530; RV32IF-LABEL: fmv_w_x:
531; RV32IF:       # %bb.0:
532; RV32IF-NEXT:    fmv.w.x ft0, a0
533; RV32IF-NEXT:    fmv.w.x ft1, a1
534; RV32IF-NEXT:    fadd.s fa0, ft0, ft1
535; RV32IF-NEXT:    ret
536;
537; RV64IF-LABEL: fmv_w_x:
538; RV64IF:       # %bb.0:
539; RV64IF-NEXT:    fmv.w.x ft0, a0
540; RV64IF-NEXT:    fmv.w.x ft1, a1
541; RV64IF-NEXT:    fadd.s fa0, ft0, ft1
542; RV64IF-NEXT:    ret
543;
544; RV32I-LABEL: fmv_w_x:
545; RV32I:       # %bb.0:
546; RV32I-NEXT:    addi sp, sp, -16
547; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
548; RV32I-NEXT:    call __addsf3@plt
549; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
550; RV32I-NEXT:    addi sp, sp, 16
551; RV32I-NEXT:    ret
552;
553; RV64I-LABEL: fmv_w_x:
554; RV64I:       # %bb.0:
555; RV64I-NEXT:    addi sp, sp, -16
556; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
557; RV64I-NEXT:    call __addsf3@plt
558; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
559; RV64I-NEXT:    addi sp, sp, 16
560; RV64I-NEXT:    ret
561; Ensure fmv.w.x is generated even for a soft float calling convention
562  %1 = bitcast i32 %a to float
563  %2 = bitcast i32 %b to float
564  %3 = fadd float %1, %2
565  ret float %3
566}
567
568define i64 @fcvt_l_s(float %a) nounwind {
569; RV32IF-LABEL: fcvt_l_s:
570; RV32IF:       # %bb.0:
571; RV32IF-NEXT:    addi sp, sp, -16
572; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
573; RV32IF-NEXT:    call __fixsfdi@plt
574; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
575; RV32IF-NEXT:    addi sp, sp, 16
576; RV32IF-NEXT:    ret
577;
578; RV64IF-LABEL: fcvt_l_s:
579; RV64IF:       # %bb.0:
580; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
581; RV64IF-NEXT:    ret
582;
583; RV32I-LABEL: fcvt_l_s:
584; RV32I:       # %bb.0:
585; RV32I-NEXT:    addi sp, sp, -16
586; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
587; RV32I-NEXT:    call __fixsfdi@plt
588; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
589; RV32I-NEXT:    addi sp, sp, 16
590; RV32I-NEXT:    ret
591;
592; RV64I-LABEL: fcvt_l_s:
593; RV64I:       # %bb.0:
594; RV64I-NEXT:    addi sp, sp, -16
595; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
596; RV64I-NEXT:    call __fixsfdi@plt
597; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
598; RV64I-NEXT:    addi sp, sp, 16
599; RV64I-NEXT:    ret
600  %1 = fptosi float %a to i64
601  ret i64 %1
602}
603
604define i64 @fcvt_l_s_sat(float %a) nounwind {
605; RV32IF-LABEL: fcvt_l_s_sat:
606; RV32IF:       # %bb.0: # %start
607; RV32IF-NEXT:    addi sp, sp, -16
608; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
609; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
610; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
611; RV32IF-NEXT:    lui a0, %hi(.LCPI12_0)
612; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_0)(a0)
613; RV32IF-NEXT:    fmv.s fs0, fa0
614; RV32IF-NEXT:    fle.s s0, ft0, fa0
615; RV32IF-NEXT:    call __fixsfdi@plt
616; RV32IF-NEXT:    mv a2, a0
617; RV32IF-NEXT:    bnez s0, .LBB12_2
618; RV32IF-NEXT:  # %bb.1: # %start
619; RV32IF-NEXT:    li a2, 0
620; RV32IF-NEXT:  .LBB12_2: # %start
621; RV32IF-NEXT:    lui a0, %hi(.LCPI12_1)
622; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_1)(a0)
623; RV32IF-NEXT:    flt.s a3, ft0, fs0
624; RV32IF-NEXT:    li a0, -1
625; RV32IF-NEXT:    beqz a3, .LBB12_9
626; RV32IF-NEXT:  # %bb.3: # %start
627; RV32IF-NEXT:    feq.s a2, fs0, fs0
628; RV32IF-NEXT:    beqz a2, .LBB12_10
629; RV32IF-NEXT:  .LBB12_4: # %start
630; RV32IF-NEXT:    lui a4, 524288
631; RV32IF-NEXT:    beqz s0, .LBB12_11
632; RV32IF-NEXT:  .LBB12_5: # %start
633; RV32IF-NEXT:    bnez a3, .LBB12_12
634; RV32IF-NEXT:  .LBB12_6: # %start
635; RV32IF-NEXT:    bnez a2, .LBB12_8
636; RV32IF-NEXT:  .LBB12_7: # %start
637; RV32IF-NEXT:    li a1, 0
638; RV32IF-NEXT:  .LBB12_8: # %start
639; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
640; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
641; RV32IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
642; RV32IF-NEXT:    addi sp, sp, 16
643; RV32IF-NEXT:    ret
644; RV32IF-NEXT:  .LBB12_9: # %start
645; RV32IF-NEXT:    mv a0, a2
646; RV32IF-NEXT:    feq.s a2, fs0, fs0
647; RV32IF-NEXT:    bnez a2, .LBB12_4
648; RV32IF-NEXT:  .LBB12_10: # %start
649; RV32IF-NEXT:    li a0, 0
650; RV32IF-NEXT:    lui a4, 524288
651; RV32IF-NEXT:    bnez s0, .LBB12_5
652; RV32IF-NEXT:  .LBB12_11: # %start
653; RV32IF-NEXT:    lui a1, 524288
654; RV32IF-NEXT:    beqz a3, .LBB12_6
655; RV32IF-NEXT:  .LBB12_12:
656; RV32IF-NEXT:    addi a1, a4, -1
657; RV32IF-NEXT:    beqz a2, .LBB12_7
658; RV32IF-NEXT:    j .LBB12_8
659;
660; RV64IF-LABEL: fcvt_l_s_sat:
661; RV64IF:       # %bb.0: # %start
662; RV64IF-NEXT:    feq.s a0, fa0, fa0
663; RV64IF-NEXT:    bnez a0, .LBB12_2
664; RV64IF-NEXT:  # %bb.1: # %start
665; RV64IF-NEXT:    li a0, 0
666; RV64IF-NEXT:    ret
667; RV64IF-NEXT:  .LBB12_2:
668; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
669; RV64IF-NEXT:    ret
670;
671; RV32I-LABEL: fcvt_l_s_sat:
672; RV32I:       # %bb.0: # %start
673; RV32I-NEXT:    addi sp, sp, -32
674; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
675; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
676; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
677; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
678; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
679; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
680; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
681; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
682; RV32I-NEXT:    mv s0, a0
683; RV32I-NEXT:    lui a1, 913408
684; RV32I-NEXT:    call __gesf2@plt
685; RV32I-NEXT:    mv s3, a0
686; RV32I-NEXT:    mv a0, s0
687; RV32I-NEXT:    call __fixsfdi@plt
688; RV32I-NEXT:    mv s2, a1
689; RV32I-NEXT:    li s1, 0
690; RV32I-NEXT:    li s5, 0
691; RV32I-NEXT:    bltz s3, .LBB12_2
692; RV32I-NEXT:  # %bb.1: # %start
693; RV32I-NEXT:    mv s5, a0
694; RV32I-NEXT:  .LBB12_2: # %start
695; RV32I-NEXT:    lui a0, 389120
696; RV32I-NEXT:    addi s4, a0, -1
697; RV32I-NEXT:    mv a0, s0
698; RV32I-NEXT:    mv a1, s4
699; RV32I-NEXT:    call __gtsf2@plt
700; RV32I-NEXT:    li s6, -1
701; RV32I-NEXT:    blt s1, a0, .LBB12_4
702; RV32I-NEXT:  # %bb.3: # %start
703; RV32I-NEXT:    mv s6, s5
704; RV32I-NEXT:  .LBB12_4: # %start
705; RV32I-NEXT:    mv a0, s0
706; RV32I-NEXT:    mv a1, s0
707; RV32I-NEXT:    call __unordsf2@plt
708; RV32I-NEXT:    mv s3, s1
709; RV32I-NEXT:    bne a0, s1, .LBB12_6
710; RV32I-NEXT:  # %bb.5: # %start
711; RV32I-NEXT:    mv s3, s6
712; RV32I-NEXT:  .LBB12_6: # %start
713; RV32I-NEXT:    lui a1, 913408
714; RV32I-NEXT:    mv a0, s0
715; RV32I-NEXT:    call __gesf2@plt
716; RV32I-NEXT:    lui s6, 524288
717; RV32I-NEXT:    lui s5, 524288
718; RV32I-NEXT:    blt a0, s1, .LBB12_8
719; RV32I-NEXT:  # %bb.7: # %start
720; RV32I-NEXT:    mv s5, s2
721; RV32I-NEXT:  .LBB12_8: # %start
722; RV32I-NEXT:    mv a0, s0
723; RV32I-NEXT:    mv a1, s4
724; RV32I-NEXT:    call __gtsf2@plt
725; RV32I-NEXT:    bge s1, a0, .LBB12_10
726; RV32I-NEXT:  # %bb.9:
727; RV32I-NEXT:    addi s5, s6, -1
728; RV32I-NEXT:  .LBB12_10: # %start
729; RV32I-NEXT:    mv a0, s0
730; RV32I-NEXT:    mv a1, s0
731; RV32I-NEXT:    call __unordsf2@plt
732; RV32I-NEXT:    bne a0, s1, .LBB12_12
733; RV32I-NEXT:  # %bb.11: # %start
734; RV32I-NEXT:    mv s1, s5
735; RV32I-NEXT:  .LBB12_12: # %start
736; RV32I-NEXT:    mv a0, s3
737; RV32I-NEXT:    mv a1, s1
738; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
739; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
740; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
741; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
742; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
743; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
744; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
745; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
746; RV32I-NEXT:    addi sp, sp, 32
747; RV32I-NEXT:    ret
748;
749; RV64I-LABEL: fcvt_l_s_sat:
750; RV64I:       # %bb.0: # %start
751; RV64I-NEXT:    addi sp, sp, -48
752; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
753; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
754; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
755; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
756; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
757; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
758; RV64I-NEXT:    mv s0, a0
759; RV64I-NEXT:    lui a1, 913408
760; RV64I-NEXT:    call __gesf2@plt
761; RV64I-NEXT:    mv s3, a0
762; RV64I-NEXT:    mv a0, s0
763; RV64I-NEXT:    call __fixsfdi@plt
764; RV64I-NEXT:    li s1, 0
765; RV64I-NEXT:    li s4, -1
766; RV64I-NEXT:    bltz s3, .LBB12_2
767; RV64I-NEXT:  # %bb.1: # %start
768; RV64I-NEXT:    mv s2, a0
769; RV64I-NEXT:    j .LBB12_3
770; RV64I-NEXT:  .LBB12_2:
771; RV64I-NEXT:    slli s2, s4, 63
772; RV64I-NEXT:  .LBB12_3: # %start
773; RV64I-NEXT:    lui a0, 389120
774; RV64I-NEXT:    addiw a1, a0, -1
775; RV64I-NEXT:    mv a0, s0
776; RV64I-NEXT:    call __gtsf2@plt
777; RV64I-NEXT:    bge s1, a0, .LBB12_5
778; RV64I-NEXT:  # %bb.4:
779; RV64I-NEXT:    srli s2, s4, 1
780; RV64I-NEXT:  .LBB12_5: # %start
781; RV64I-NEXT:    mv a0, s0
782; RV64I-NEXT:    mv a1, s0
783; RV64I-NEXT:    call __unordsf2@plt
784; RV64I-NEXT:    bne a0, s1, .LBB12_7
785; RV64I-NEXT:  # %bb.6: # %start
786; RV64I-NEXT:    mv s1, s2
787; RV64I-NEXT:  .LBB12_7: # %start
788; RV64I-NEXT:    mv a0, s1
789; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
790; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
791; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
792; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
793; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
794; RV64I-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
795; RV64I-NEXT:    addi sp, sp, 48
796; RV64I-NEXT:    ret
797start:
798  %0 = tail call i64 @llvm.fptosi.sat.i64.f32(float %a)
799  ret i64 %0
800}
801declare i64 @llvm.fptosi.sat.i64.f32(float)
802
803define i64 @fcvt_lu_s(float %a) nounwind {
804; RV32IF-LABEL: fcvt_lu_s:
805; RV32IF:       # %bb.0:
806; RV32IF-NEXT:    addi sp, sp, -16
807; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
808; RV32IF-NEXT:    call __fixunssfdi@plt
809; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
810; RV32IF-NEXT:    addi sp, sp, 16
811; RV32IF-NEXT:    ret
812;
813; RV64IF-LABEL: fcvt_lu_s:
814; RV64IF:       # %bb.0:
815; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
816; RV64IF-NEXT:    ret
817;
818; RV32I-LABEL: fcvt_lu_s:
819; RV32I:       # %bb.0:
820; RV32I-NEXT:    addi sp, sp, -16
821; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
822; RV32I-NEXT:    call __fixunssfdi@plt
823; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
824; RV32I-NEXT:    addi sp, sp, 16
825; RV32I-NEXT:    ret
826;
827; RV64I-LABEL: fcvt_lu_s:
828; RV64I:       # %bb.0:
829; RV64I-NEXT:    addi sp, sp, -16
830; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
831; RV64I-NEXT:    call __fixunssfdi@plt
832; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
833; RV64I-NEXT:    addi sp, sp, 16
834; RV64I-NEXT:    ret
835  %1 = fptoui float %a to i64
836  ret i64 %1
837}
838
839define i64 @fcvt_lu_s_sat(float %a) nounwind {
840; RV32IF-LABEL: fcvt_lu_s_sat:
841; RV32IF:       # %bb.0: # %start
842; RV32IF-NEXT:    addi sp, sp, -16
843; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
844; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
845; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
846; RV32IF-NEXT:    fmv.s fs0, fa0
847; RV32IF-NEXT:    fmv.w.x ft0, zero
848; RV32IF-NEXT:    fle.s s0, ft0, fa0
849; RV32IF-NEXT:    call __fixunssfdi@plt
850; RV32IF-NEXT:    mv a3, a0
851; RV32IF-NEXT:    bnez s0, .LBB14_2
852; RV32IF-NEXT:  # %bb.1: # %start
853; RV32IF-NEXT:    li a3, 0
854; RV32IF-NEXT:  .LBB14_2: # %start
855; RV32IF-NEXT:    lui a0, %hi(.LCPI14_0)
856; RV32IF-NEXT:    flw ft0, %lo(.LCPI14_0)(a0)
857; RV32IF-NEXT:    flt.s a4, ft0, fs0
858; RV32IF-NEXT:    li a2, -1
859; RV32IF-NEXT:    li a0, -1
860; RV32IF-NEXT:    beqz a4, .LBB14_7
861; RV32IF-NEXT:  # %bb.3: # %start
862; RV32IF-NEXT:    beqz s0, .LBB14_8
863; RV32IF-NEXT:  .LBB14_4: # %start
864; RV32IF-NEXT:    bnez a4, .LBB14_6
865; RV32IF-NEXT:  .LBB14_5: # %start
866; RV32IF-NEXT:    mv a2, a1
867; RV32IF-NEXT:  .LBB14_6: # %start
868; RV32IF-NEXT:    mv a1, a2
869; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
870; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
871; RV32IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
872; RV32IF-NEXT:    addi sp, sp, 16
873; RV32IF-NEXT:    ret
874; RV32IF-NEXT:  .LBB14_7: # %start
875; RV32IF-NEXT:    mv a0, a3
876; RV32IF-NEXT:    bnez s0, .LBB14_4
877; RV32IF-NEXT:  .LBB14_8: # %start
878; RV32IF-NEXT:    li a1, 0
879; RV32IF-NEXT:    beqz a4, .LBB14_5
880; RV32IF-NEXT:    j .LBB14_6
881;
882; RV64IF-LABEL: fcvt_lu_s_sat:
883; RV64IF:       # %bb.0: # %start
884; RV64IF-NEXT:    feq.s a0, fa0, fa0
885; RV64IF-NEXT:    bnez a0, .LBB14_2
886; RV64IF-NEXT:  # %bb.1: # %start
887; RV64IF-NEXT:    li a0, 0
888; RV64IF-NEXT:    ret
889; RV64IF-NEXT:  .LBB14_2:
890; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
891; RV64IF-NEXT:    ret
892;
893; RV32I-LABEL: fcvt_lu_s_sat:
894; RV32I:       # %bb.0: # %start
895; RV32I-NEXT:    addi sp, sp, -32
896; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
897; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
898; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
899; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
900; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
901; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
902; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
903; RV32I-NEXT:    mv s0, a0
904; RV32I-NEXT:    li a1, 0
905; RV32I-NEXT:    call __gesf2@plt
906; RV32I-NEXT:    mv s1, a0
907; RV32I-NEXT:    mv a0, s0
908; RV32I-NEXT:    call __fixunssfdi@plt
909; RV32I-NEXT:    mv s2, a1
910; RV32I-NEXT:    li s5, 0
911; RV32I-NEXT:    bltz s1, .LBB14_2
912; RV32I-NEXT:  # %bb.1: # %start
913; RV32I-NEXT:    mv s5, a0
914; RV32I-NEXT:  .LBB14_2: # %start
915; RV32I-NEXT:    lui a0, 391168
916; RV32I-NEXT:    addi s1, a0, -1
917; RV32I-NEXT:    mv a0, s0
918; RV32I-NEXT:    mv a1, s1
919; RV32I-NEXT:    call __gtsf2@plt
920; RV32I-NEXT:    li s3, -1
921; RV32I-NEXT:    li s4, -1
922; RV32I-NEXT:    bgtz a0, .LBB14_4
923; RV32I-NEXT:  # %bb.3: # %start
924; RV32I-NEXT:    mv s4, s5
925; RV32I-NEXT:  .LBB14_4: # %start
926; RV32I-NEXT:    mv a0, s0
927; RV32I-NEXT:    li a1, 0
928; RV32I-NEXT:    call __gesf2@plt
929; RV32I-NEXT:    li s5, 0
930; RV32I-NEXT:    bltz a0, .LBB14_6
931; RV32I-NEXT:  # %bb.5: # %start
932; RV32I-NEXT:    mv s5, s2
933; RV32I-NEXT:  .LBB14_6: # %start
934; RV32I-NEXT:    mv a0, s0
935; RV32I-NEXT:    mv a1, s1
936; RV32I-NEXT:    call __gtsf2@plt
937; RV32I-NEXT:    bgtz a0, .LBB14_8
938; RV32I-NEXT:  # %bb.7: # %start
939; RV32I-NEXT:    mv s3, s5
940; RV32I-NEXT:  .LBB14_8: # %start
941; RV32I-NEXT:    mv a0, s4
942; RV32I-NEXT:    mv a1, s3
943; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
944; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
945; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
946; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
947; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
948; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
949; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
950; RV32I-NEXT:    addi sp, sp, 32
951; RV32I-NEXT:    ret
952;
953; RV64I-LABEL: fcvt_lu_s_sat:
954; RV64I:       # %bb.0: # %start
955; RV64I-NEXT:    addi sp, sp, -32
956; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
957; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
958; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
959; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
960; RV64I-NEXT:    mv s0, a0
961; RV64I-NEXT:    li a1, 0
962; RV64I-NEXT:    call __gesf2@plt
963; RV64I-NEXT:    mv s1, a0
964; RV64I-NEXT:    mv a0, s0
965; RV64I-NEXT:    call __fixunssfdi@plt
966; RV64I-NEXT:    li s2, 0
967; RV64I-NEXT:    bltz s1, .LBB14_2
968; RV64I-NEXT:  # %bb.1: # %start
969; RV64I-NEXT:    mv s2, a0
970; RV64I-NEXT:  .LBB14_2: # %start
971; RV64I-NEXT:    lui a0, 391168
972; RV64I-NEXT:    addiw a1, a0, -1
973; RV64I-NEXT:    mv a0, s0
974; RV64I-NEXT:    call __gtsf2@plt
975; RV64I-NEXT:    mv a1, a0
976; RV64I-NEXT:    li a0, -1
977; RV64I-NEXT:    bgtz a1, .LBB14_4
978; RV64I-NEXT:  # %bb.3: # %start
979; RV64I-NEXT:    mv a0, s2
980; RV64I-NEXT:  .LBB14_4: # %start
981; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
982; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
983; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
984; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
985; RV64I-NEXT:    addi sp, sp, 32
986; RV64I-NEXT:    ret
987start:
988  %0 = tail call i64 @llvm.fptoui.sat.i64.f32(float %a)
989  ret i64 %0
990}
991declare i64 @llvm.fptoui.sat.i64.f32(float)
992
993define float @fcvt_s_l(i64 %a) nounwind {
994; RV32IF-LABEL: fcvt_s_l:
995; RV32IF:       # %bb.0:
996; RV32IF-NEXT:    addi sp, sp, -16
997; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
998; RV32IF-NEXT:    call __floatdisf@plt
999; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1000; RV32IF-NEXT:    addi sp, sp, 16
1001; RV32IF-NEXT:    ret
1002;
1003; RV64IF-LABEL: fcvt_s_l:
1004; RV64IF:       # %bb.0:
1005; RV64IF-NEXT:    fcvt.s.l fa0, a0
1006; RV64IF-NEXT:    ret
1007;
1008; RV32I-LABEL: fcvt_s_l:
1009; RV32I:       # %bb.0:
1010; RV32I-NEXT:    addi sp, sp, -16
1011; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1012; RV32I-NEXT:    call __floatdisf@plt
1013; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1014; RV32I-NEXT:    addi sp, sp, 16
1015; RV32I-NEXT:    ret
1016;
1017; RV64I-LABEL: fcvt_s_l:
1018; RV64I:       # %bb.0:
1019; RV64I-NEXT:    addi sp, sp, -16
1020; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1021; RV64I-NEXT:    call __floatdisf@plt
1022; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1023; RV64I-NEXT:    addi sp, sp, 16
1024; RV64I-NEXT:    ret
1025  %1 = sitofp i64 %a to float
1026  ret float %1
1027}
1028
1029define float @fcvt_s_lu(i64 %a) nounwind {
1030; RV32IF-LABEL: fcvt_s_lu:
1031; RV32IF:       # %bb.0:
1032; RV32IF-NEXT:    addi sp, sp, -16
1033; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1034; RV32IF-NEXT:    call __floatundisf@plt
1035; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1036; RV32IF-NEXT:    addi sp, sp, 16
1037; RV32IF-NEXT:    ret
1038;
1039; RV64IF-LABEL: fcvt_s_lu:
1040; RV64IF:       # %bb.0:
1041; RV64IF-NEXT:    fcvt.s.lu fa0, a0
1042; RV64IF-NEXT:    ret
1043;
1044; RV32I-LABEL: fcvt_s_lu:
1045; RV32I:       # %bb.0:
1046; RV32I-NEXT:    addi sp, sp, -16
1047; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1048; RV32I-NEXT:    call __floatundisf@plt
1049; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1050; RV32I-NEXT:    addi sp, sp, 16
1051; RV32I-NEXT:    ret
1052;
1053; RV64I-LABEL: fcvt_s_lu:
1054; RV64I:       # %bb.0:
1055; RV64I-NEXT:    addi sp, sp, -16
1056; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1057; RV64I-NEXT:    call __floatundisf@plt
1058; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1059; RV64I-NEXT:    addi sp, sp, 16
1060; RV64I-NEXT:    ret
1061  %1 = uitofp i64 %a to float
1062  ret float %1
1063}
1064
1065define float @fcvt_s_w_i8(i8 signext %a) nounwind {
1066; RV32IF-LABEL: fcvt_s_w_i8:
1067; RV32IF:       # %bb.0:
1068; RV32IF-NEXT:    fcvt.s.w fa0, a0
1069; RV32IF-NEXT:    ret
1070;
1071; RV64IF-LABEL: fcvt_s_w_i8:
1072; RV64IF:       # %bb.0:
1073; RV64IF-NEXT:    fcvt.s.w fa0, a0
1074; RV64IF-NEXT:    ret
1075;
1076; RV32I-LABEL: fcvt_s_w_i8:
1077; RV32I:       # %bb.0:
1078; RV32I-NEXT:    addi sp, sp, -16
1079; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1080; RV32I-NEXT:    call __floatsisf@plt
1081; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1082; RV32I-NEXT:    addi sp, sp, 16
1083; RV32I-NEXT:    ret
1084;
1085; RV64I-LABEL: fcvt_s_w_i8:
1086; RV64I:       # %bb.0:
1087; RV64I-NEXT:    addi sp, sp, -16
1088; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1089; RV64I-NEXT:    call __floatsisf@plt
1090; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1091; RV64I-NEXT:    addi sp, sp, 16
1092; RV64I-NEXT:    ret
1093  %1 = sitofp i8 %a to float
1094  ret float %1
1095}
1096
1097define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
1098; RV32IF-LABEL: fcvt_s_wu_i8:
1099; RV32IF:       # %bb.0:
1100; RV32IF-NEXT:    fcvt.s.wu fa0, a0
1101; RV32IF-NEXT:    ret
1102;
1103; RV64IF-LABEL: fcvt_s_wu_i8:
1104; RV64IF:       # %bb.0:
1105; RV64IF-NEXT:    fcvt.s.wu fa0, a0
1106; RV64IF-NEXT:    ret
1107;
1108; RV32I-LABEL: fcvt_s_wu_i8:
1109; RV32I:       # %bb.0:
1110; RV32I-NEXT:    addi sp, sp, -16
1111; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1112; RV32I-NEXT:    call __floatunsisf@plt
1113; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1114; RV32I-NEXT:    addi sp, sp, 16
1115; RV32I-NEXT:    ret
1116;
1117; RV64I-LABEL: fcvt_s_wu_i8:
1118; RV64I:       # %bb.0:
1119; RV64I-NEXT:    addi sp, sp, -16
1120; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1121; RV64I-NEXT:    call __floatunsisf@plt
1122; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1123; RV64I-NEXT:    addi sp, sp, 16
1124; RV64I-NEXT:    ret
1125  %1 = uitofp i8 %a to float
1126  ret float %1
1127}
1128
1129define float @fcvt_s_w_i16(i16 signext %a) nounwind {
1130; RV32IF-LABEL: fcvt_s_w_i16:
1131; RV32IF:       # %bb.0:
1132; RV32IF-NEXT:    fcvt.s.w fa0, a0
1133; RV32IF-NEXT:    ret
1134;
1135; RV64IF-LABEL: fcvt_s_w_i16:
1136; RV64IF:       # %bb.0:
1137; RV64IF-NEXT:    fcvt.s.w fa0, a0
1138; RV64IF-NEXT:    ret
1139;
1140; RV32I-LABEL: fcvt_s_w_i16:
1141; RV32I:       # %bb.0:
1142; RV32I-NEXT:    addi sp, sp, -16
1143; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1144; RV32I-NEXT:    call __floatsisf@plt
1145; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1146; RV32I-NEXT:    addi sp, sp, 16
1147; RV32I-NEXT:    ret
1148;
1149; RV64I-LABEL: fcvt_s_w_i16:
1150; RV64I:       # %bb.0:
1151; RV64I-NEXT:    addi sp, sp, -16
1152; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1153; RV64I-NEXT:    call __floatsisf@plt
1154; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1155; RV64I-NEXT:    addi sp, sp, 16
1156; RV64I-NEXT:    ret
1157  %1 = sitofp i16 %a to float
1158  ret float %1
1159}
1160
1161define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
1162; RV32IF-LABEL: fcvt_s_wu_i16:
1163; RV32IF:       # %bb.0:
1164; RV32IF-NEXT:    fcvt.s.wu fa0, a0
1165; RV32IF-NEXT:    ret
1166;
1167; RV64IF-LABEL: fcvt_s_wu_i16:
1168; RV64IF:       # %bb.0:
1169; RV64IF-NEXT:    fcvt.s.wu fa0, a0
1170; RV64IF-NEXT:    ret
1171;
1172; RV32I-LABEL: fcvt_s_wu_i16:
1173; RV32I:       # %bb.0:
1174; RV32I-NEXT:    addi sp, sp, -16
1175; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1176; RV32I-NEXT:    call __floatunsisf@plt
1177; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1178; RV32I-NEXT:    addi sp, sp, 16
1179; RV32I-NEXT:    ret
1180;
1181; RV64I-LABEL: fcvt_s_wu_i16:
1182; RV64I:       # %bb.0:
1183; RV64I-NEXT:    addi sp, sp, -16
1184; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1185; RV64I-NEXT:    call __floatunsisf@plt
1186; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1187; RV64I-NEXT:    addi sp, sp, 16
1188; RV64I-NEXT:    ret
1189  %1 = uitofp i16 %a to float
1190  ret float %1
1191}
1192
1193; Make sure we select W version of addi on RV64.
1194define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, float* %1) nounwind {
1195; RV32IF-LABEL: fcvt_s_w_demanded_bits:
1196; RV32IF:       # %bb.0:
1197; RV32IF-NEXT:    addi a0, a0, 1
1198; RV32IF-NEXT:    fcvt.s.w ft0, a0
1199; RV32IF-NEXT:    fsw ft0, 0(a1)
1200; RV32IF-NEXT:    ret
1201;
1202; RV64IF-LABEL: fcvt_s_w_demanded_bits:
1203; RV64IF:       # %bb.0:
1204; RV64IF-NEXT:    addiw a0, a0, 1
1205; RV64IF-NEXT:    fcvt.s.w ft0, a0
1206; RV64IF-NEXT:    fsw ft0, 0(a1)
1207; RV64IF-NEXT:    ret
1208;
1209; RV32I-LABEL: fcvt_s_w_demanded_bits:
1210; RV32I:       # %bb.0:
1211; RV32I-NEXT:    addi sp, sp, -16
1212; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1213; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1214; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1215; RV32I-NEXT:    mv s0, a1
1216; RV32I-NEXT:    addi s1, a0, 1
1217; RV32I-NEXT:    mv a0, s1
1218; RV32I-NEXT:    call __floatsisf@plt
1219; RV32I-NEXT:    sw a0, 0(s0)
1220; RV32I-NEXT:    mv a0, s1
1221; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1222; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1223; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1224; RV32I-NEXT:    addi sp, sp, 16
1225; RV32I-NEXT:    ret
1226;
1227; RV64I-LABEL: fcvt_s_w_demanded_bits:
1228; RV64I:       # %bb.0:
1229; RV64I-NEXT:    addi sp, sp, -32
1230; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1231; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1232; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1233; RV64I-NEXT:    mv s0, a1
1234; RV64I-NEXT:    addiw s1, a0, 1
1235; RV64I-NEXT:    mv a0, s1
1236; RV64I-NEXT:    call __floatsisf@plt
1237; RV64I-NEXT:    sw a0, 0(s0)
1238; RV64I-NEXT:    mv a0, s1
1239; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1240; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1241; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1242; RV64I-NEXT:    addi sp, sp, 32
1243; RV64I-NEXT:    ret
1244  %3 = add i32 %0, 1
1245  %4 = sitofp i32 %3 to float
1246  store float %4, float* %1, align 4
1247  ret i32 %3
1248}
1249
1250; Make sure we select W version of addi on RV64.
1251define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, float* %1) nounwind {
1252; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
1253; RV32IF:       # %bb.0:
1254; RV32IF-NEXT:    addi a0, a0, 1
1255; RV32IF-NEXT:    fcvt.s.wu ft0, a0
1256; RV32IF-NEXT:    fsw ft0, 0(a1)
1257; RV32IF-NEXT:    ret
1258;
1259; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
1260; RV64IF:       # %bb.0:
1261; RV64IF-NEXT:    addiw a0, a0, 1
1262; RV64IF-NEXT:    fcvt.s.wu ft0, a0
1263; RV64IF-NEXT:    fsw ft0, 0(a1)
1264; RV64IF-NEXT:    ret
1265;
1266; RV32I-LABEL: fcvt_s_wu_demanded_bits:
1267; RV32I:       # %bb.0:
1268; RV32I-NEXT:    addi sp, sp, -16
1269; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1270; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1271; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1272; RV32I-NEXT:    mv s0, a1
1273; RV32I-NEXT:    addi s1, a0, 1
1274; RV32I-NEXT:    mv a0, s1
1275; RV32I-NEXT:    call __floatunsisf@plt
1276; RV32I-NEXT:    sw a0, 0(s0)
1277; RV32I-NEXT:    mv a0, s1
1278; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1279; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1280; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1281; RV32I-NEXT:    addi sp, sp, 16
1282; RV32I-NEXT:    ret
1283;
1284; RV64I-LABEL: fcvt_s_wu_demanded_bits:
1285; RV64I:       # %bb.0:
1286; RV64I-NEXT:    addi sp, sp, -32
1287; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1288; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1289; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1290; RV64I-NEXT:    mv s0, a1
1291; RV64I-NEXT:    addiw s1, a0, 1
1292; RV64I-NEXT:    mv a0, s1
1293; RV64I-NEXT:    call __floatunsisf@plt
1294; RV64I-NEXT:    sw a0, 0(s0)
1295; RV64I-NEXT:    mv a0, s1
1296; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1297; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1298; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1299; RV64I-NEXT:    addi sp, sp, 32
1300; RV64I-NEXT:    ret
1301  %3 = add i32 %0, 1
1302  %4 = uitofp i32 %3 to float
1303  store float %4, float* %1, align 4
1304  ret i32 %3
1305}
1306
1307define signext i16 @fcvt_w_s_i16(float %a) nounwind {
1308; RV32IF-LABEL: fcvt_w_s_i16:
1309; RV32IF:       # %bb.0:
1310; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
1311; RV32IF-NEXT:    ret
1312;
1313; RV64IF-LABEL: fcvt_w_s_i16:
1314; RV64IF:       # %bb.0:
1315; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
1316; RV64IF-NEXT:    ret
1317;
1318; RV32I-LABEL: fcvt_w_s_i16:
1319; RV32I:       # %bb.0:
1320; RV32I-NEXT:    addi sp, sp, -16
1321; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1322; RV32I-NEXT:    call __fixsfsi@plt
1323; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1324; RV32I-NEXT:    addi sp, sp, 16
1325; RV32I-NEXT:    ret
1326;
1327; RV64I-LABEL: fcvt_w_s_i16:
1328; RV64I:       # %bb.0:
1329; RV64I-NEXT:    addi sp, sp, -16
1330; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1331; RV64I-NEXT:    call __fixsfdi@plt
1332; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1333; RV64I-NEXT:    addi sp, sp, 16
1334; RV64I-NEXT:    ret
1335  %1 = fptosi float %a to i16
1336  ret i16 %1
1337}
1338
1339define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
1340; RV32IF-LABEL: fcvt_w_s_sat_i16:
1341; RV32IF:       # %bb.0: # %start
1342; RV32IF-NEXT:    feq.s a0, fa0, fa0
1343; RV32IF-NEXT:    bnez a0, .LBB24_2
1344; RV32IF-NEXT:  # %bb.1: # %start
1345; RV32IF-NEXT:    li a0, 0
1346; RV32IF-NEXT:    ret
1347; RV32IF-NEXT:  .LBB24_2:
1348; RV32IF-NEXT:    lui a0, %hi(.LCPI24_0)
1349; RV32IF-NEXT:    flw ft0, %lo(.LCPI24_0)(a0)
1350; RV32IF-NEXT:    lui a0, %hi(.LCPI24_1)
1351; RV32IF-NEXT:    flw ft1, %lo(.LCPI24_1)(a0)
1352; RV32IF-NEXT:    fmax.s ft0, fa0, ft0
1353; RV32IF-NEXT:    fmin.s ft0, ft0, ft1
1354; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
1355; RV32IF-NEXT:    ret
1356;
1357; RV64IF-LABEL: fcvt_w_s_sat_i16:
1358; RV64IF:       # %bb.0: # %start
1359; RV64IF-NEXT:    feq.s a0, fa0, fa0
1360; RV64IF-NEXT:    bnez a0, .LBB24_2
1361; RV64IF-NEXT:  # %bb.1: # %start
1362; RV64IF-NEXT:    li a0, 0
1363; RV64IF-NEXT:    ret
1364; RV64IF-NEXT:  .LBB24_2:
1365; RV64IF-NEXT:    lui a0, %hi(.LCPI24_0)
1366; RV64IF-NEXT:    flw ft0, %lo(.LCPI24_0)(a0)
1367; RV64IF-NEXT:    lui a0, %hi(.LCPI24_1)
1368; RV64IF-NEXT:    flw ft1, %lo(.LCPI24_1)(a0)
1369; RV64IF-NEXT:    fmax.s ft0, fa0, ft0
1370; RV64IF-NEXT:    fmin.s ft0, ft0, ft1
1371; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
1372; RV64IF-NEXT:    ret
1373;
1374; RV32I-LABEL: fcvt_w_s_sat_i16:
1375; RV32I:       # %bb.0: # %start
1376; RV32I-NEXT:    addi sp, sp, -32
1377; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
1378; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
1379; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
1380; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
1381; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
1382; RV32I-NEXT:    mv s0, a0
1383; RV32I-NEXT:    lui a1, 815104
1384; RV32I-NEXT:    call __gesf2@plt
1385; RV32I-NEXT:    mv s2, a0
1386; RV32I-NEXT:    mv a0, s0
1387; RV32I-NEXT:    call __fixsfsi@plt
1388; RV32I-NEXT:    li s1, 0
1389; RV32I-NEXT:    lui s3, 1048568
1390; RV32I-NEXT:    bltz s2, .LBB24_2
1391; RV32I-NEXT:  # %bb.1: # %start
1392; RV32I-NEXT:    mv s3, a0
1393; RV32I-NEXT:  .LBB24_2: # %start
1394; RV32I-NEXT:    lui a0, 290816
1395; RV32I-NEXT:    addi a1, a0, -512
1396; RV32I-NEXT:    mv a0, s0
1397; RV32I-NEXT:    call __gtsf2@plt
1398; RV32I-NEXT:    bge s1, a0, .LBB24_4
1399; RV32I-NEXT:  # %bb.3:
1400; RV32I-NEXT:    lui a0, 8
1401; RV32I-NEXT:    addi s3, a0, -1
1402; RV32I-NEXT:  .LBB24_4: # %start
1403; RV32I-NEXT:    mv a0, s0
1404; RV32I-NEXT:    mv a1, s0
1405; RV32I-NEXT:    call __unordsf2@plt
1406; RV32I-NEXT:    bne a0, s1, .LBB24_6
1407; RV32I-NEXT:  # %bb.5: # %start
1408; RV32I-NEXT:    mv s1, s3
1409; RV32I-NEXT:  .LBB24_6: # %start
1410; RV32I-NEXT:    slli a0, s1, 16
1411; RV32I-NEXT:    srai a0, a0, 16
1412; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
1413; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
1414; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
1415; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
1416; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
1417; RV32I-NEXT:    addi sp, sp, 32
1418; RV32I-NEXT:    ret
1419;
1420; RV64I-LABEL: fcvt_w_s_sat_i16:
1421; RV64I:       # %bb.0: # %start
1422; RV64I-NEXT:    addi sp, sp, -48
1423; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
1424; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
1425; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
1426; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
1427; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
1428; RV64I-NEXT:    mv s0, a0
1429; RV64I-NEXT:    lui a1, 815104
1430; RV64I-NEXT:    call __gesf2@plt
1431; RV64I-NEXT:    mv s2, a0
1432; RV64I-NEXT:    mv a0, s0
1433; RV64I-NEXT:    call __fixsfdi@plt
1434; RV64I-NEXT:    li s1, 0
1435; RV64I-NEXT:    lui s3, 1048568
1436; RV64I-NEXT:    bltz s2, .LBB24_2
1437; RV64I-NEXT:  # %bb.1: # %start
1438; RV64I-NEXT:    mv s3, a0
1439; RV64I-NEXT:  .LBB24_2: # %start
1440; RV64I-NEXT:    lui a0, 290816
1441; RV64I-NEXT:    addiw a1, a0, -512
1442; RV64I-NEXT:    mv a0, s0
1443; RV64I-NEXT:    call __gtsf2@plt
1444; RV64I-NEXT:    bge s1, a0, .LBB24_4
1445; RV64I-NEXT:  # %bb.3:
1446; RV64I-NEXT:    lui a0, 8
1447; RV64I-NEXT:    addiw s3, a0, -1
1448; RV64I-NEXT:  .LBB24_4: # %start
1449; RV64I-NEXT:    mv a0, s0
1450; RV64I-NEXT:    mv a1, s0
1451; RV64I-NEXT:    call __unordsf2@plt
1452; RV64I-NEXT:    bne a0, s1, .LBB24_6
1453; RV64I-NEXT:  # %bb.5: # %start
1454; RV64I-NEXT:    mv s1, s3
1455; RV64I-NEXT:  .LBB24_6: # %start
1456; RV64I-NEXT:    slli a0, s1, 48
1457; RV64I-NEXT:    srai a0, a0, 48
1458; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
1459; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
1460; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
1461; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
1462; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
1463; RV64I-NEXT:    addi sp, sp, 48
1464; RV64I-NEXT:    ret
1465start:
1466  %0 = tail call i16 @llvm.fptosi.sat.i16.f32(float %a)
1467  ret i16 %0
1468}
1469declare i16 @llvm.fptosi.sat.i16.f32(float)
1470
1471define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
1472; RV32IF-LABEL: fcvt_wu_s_i16:
1473; RV32IF:       # %bb.0:
1474; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
1475; RV32IF-NEXT:    ret
1476;
1477; RV64IF-LABEL: fcvt_wu_s_i16:
1478; RV64IF:       # %bb.0:
1479; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
1480; RV64IF-NEXT:    ret
1481;
1482; RV32I-LABEL: fcvt_wu_s_i16:
1483; RV32I:       # %bb.0:
1484; RV32I-NEXT:    addi sp, sp, -16
1485; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1486; RV32I-NEXT:    call __fixunssfsi@plt
1487; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1488; RV32I-NEXT:    addi sp, sp, 16
1489; RV32I-NEXT:    ret
1490;
1491; RV64I-LABEL: fcvt_wu_s_i16:
1492; RV64I:       # %bb.0:
1493; RV64I-NEXT:    addi sp, sp, -16
1494; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1495; RV64I-NEXT:    call __fixunssfdi@plt
1496; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1497; RV64I-NEXT:    addi sp, sp, 16
1498; RV64I-NEXT:    ret
1499  %1 = fptoui float %a to i16
1500  ret i16 %1
1501}
1502
1503define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
1504; RV32IF-LABEL: fcvt_wu_s_sat_i16:
1505; RV32IF:       # %bb.0: # %start
1506; RV32IF-NEXT:    lui a0, %hi(.LCPI26_0)
1507; RV32IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a0)
1508; RV32IF-NEXT:    fmv.w.x ft1, zero
1509; RV32IF-NEXT:    fmax.s ft1, fa0, ft1
1510; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
1511; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
1512; RV32IF-NEXT:    ret
1513;
1514; RV64IF-LABEL: fcvt_wu_s_sat_i16:
1515; RV64IF:       # %bb.0: # %start
1516; RV64IF-NEXT:    lui a0, %hi(.LCPI26_0)
1517; RV64IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a0)
1518; RV64IF-NEXT:    fmv.w.x ft1, zero
1519; RV64IF-NEXT:    fmax.s ft1, fa0, ft1
1520; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
1521; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
1522; RV64IF-NEXT:    ret
1523;
1524; RV32I-LABEL: fcvt_wu_s_sat_i16:
1525; RV32I:       # %bb.0: # %start
1526; RV32I-NEXT:    addi sp, sp, -16
1527; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1528; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1529; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1530; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
1531; RV32I-NEXT:    mv s0, a0
1532; RV32I-NEXT:    li a1, 0
1533; RV32I-NEXT:    call __gesf2@plt
1534; RV32I-NEXT:    mv s1, a0
1535; RV32I-NEXT:    mv a0, s0
1536; RV32I-NEXT:    call __fixunssfsi@plt
1537; RV32I-NEXT:    li s2, 0
1538; RV32I-NEXT:    bltz s1, .LBB26_2
1539; RV32I-NEXT:  # %bb.1: # %start
1540; RV32I-NEXT:    mv s2, a0
1541; RV32I-NEXT:  .LBB26_2: # %start
1542; RV32I-NEXT:    lui a0, 292864
1543; RV32I-NEXT:    addi a1, a0, -256
1544; RV32I-NEXT:    mv a0, s0
1545; RV32I-NEXT:    call __gtsf2@plt
1546; RV32I-NEXT:    lui a1, 16
1547; RV32I-NEXT:    addi a1, a1, -1
1548; RV32I-NEXT:    mv a2, a1
1549; RV32I-NEXT:    bgtz a0, .LBB26_4
1550; RV32I-NEXT:  # %bb.3: # %start
1551; RV32I-NEXT:    mv a2, s2
1552; RV32I-NEXT:  .LBB26_4: # %start
1553; RV32I-NEXT:    and a0, a2, a1
1554; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1555; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1556; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1557; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
1558; RV32I-NEXT:    addi sp, sp, 16
1559; RV32I-NEXT:    ret
1560;
1561; RV64I-LABEL: fcvt_wu_s_sat_i16:
1562; RV64I:       # %bb.0: # %start
1563; RV64I-NEXT:    addi sp, sp, -32
1564; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1565; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1566; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1567; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
1568; RV64I-NEXT:    mv s0, a0
1569; RV64I-NEXT:    li a1, 0
1570; RV64I-NEXT:    call __gesf2@plt
1571; RV64I-NEXT:    mv s1, a0
1572; RV64I-NEXT:    mv a0, s0
1573; RV64I-NEXT:    call __fixunssfdi@plt
1574; RV64I-NEXT:    li s2, 0
1575; RV64I-NEXT:    bltz s1, .LBB26_2
1576; RV64I-NEXT:  # %bb.1: # %start
1577; RV64I-NEXT:    mv s2, a0
1578; RV64I-NEXT:  .LBB26_2: # %start
1579; RV64I-NEXT:    lui a0, 292864
1580; RV64I-NEXT:    addiw a1, a0, -256
1581; RV64I-NEXT:    mv a0, s0
1582; RV64I-NEXT:    call __gtsf2@plt
1583; RV64I-NEXT:    lui a1, 16
1584; RV64I-NEXT:    addiw a1, a1, -1
1585; RV64I-NEXT:    mv a2, a1
1586; RV64I-NEXT:    bgtz a0, .LBB26_4
1587; RV64I-NEXT:  # %bb.3: # %start
1588; RV64I-NEXT:    mv a2, s2
1589; RV64I-NEXT:  .LBB26_4: # %start
1590; RV64I-NEXT:    and a0, a2, a1
1591; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1592; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1593; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1594; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
1595; RV64I-NEXT:    addi sp, sp, 32
1596; RV64I-NEXT:    ret
1597start:
1598  %0 = tail call i16 @llvm.fptoui.sat.i16.f32(float %a)
1599  ret i16 %0
1600}
1601declare i16 @llvm.fptoui.sat.i16.f32(float)
1602
1603define signext i8 @fcvt_w_s_i8(float %a) nounwind {
1604; RV32IF-LABEL: fcvt_w_s_i8:
1605; RV32IF:       # %bb.0:
1606; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
1607; RV32IF-NEXT:    ret
1608;
1609; RV64IF-LABEL: fcvt_w_s_i8:
1610; RV64IF:       # %bb.0:
1611; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
1612; RV64IF-NEXT:    ret
1613;
1614; RV32I-LABEL: fcvt_w_s_i8:
1615; RV32I:       # %bb.0:
1616; RV32I-NEXT:    addi sp, sp, -16
1617; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1618; RV32I-NEXT:    call __fixsfsi@plt
1619; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1620; RV32I-NEXT:    addi sp, sp, 16
1621; RV32I-NEXT:    ret
1622;
1623; RV64I-LABEL: fcvt_w_s_i8:
1624; RV64I:       # %bb.0:
1625; RV64I-NEXT:    addi sp, sp, -16
1626; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1627; RV64I-NEXT:    call __fixsfdi@plt
1628; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1629; RV64I-NEXT:    addi sp, sp, 16
1630; RV64I-NEXT:    ret
1631  %1 = fptosi float %a to i8
1632  ret i8 %1
1633}
1634
1635define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
1636; RV32IF-LABEL: fcvt_w_s_sat_i8:
1637; RV32IF:       # %bb.0: # %start
1638; RV32IF-NEXT:    feq.s a0, fa0, fa0
1639; RV32IF-NEXT:    bnez a0, .LBB28_2
1640; RV32IF-NEXT:  # %bb.1: # %start
1641; RV32IF-NEXT:    li a0, 0
1642; RV32IF-NEXT:    ret
1643; RV32IF-NEXT:  .LBB28_2:
1644; RV32IF-NEXT:    lui a0, %hi(.LCPI28_0)
1645; RV32IF-NEXT:    flw ft0, %lo(.LCPI28_0)(a0)
1646; RV32IF-NEXT:    lui a0, %hi(.LCPI28_1)
1647; RV32IF-NEXT:    flw ft1, %lo(.LCPI28_1)(a0)
1648; RV32IF-NEXT:    fmax.s ft0, fa0, ft0
1649; RV32IF-NEXT:    fmin.s ft0, ft0, ft1
1650; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
1651; RV32IF-NEXT:    ret
1652;
1653; RV64IF-LABEL: fcvt_w_s_sat_i8:
1654; RV64IF:       # %bb.0: # %start
1655; RV64IF-NEXT:    feq.s a0, fa0, fa0
1656; RV64IF-NEXT:    bnez a0, .LBB28_2
1657; RV64IF-NEXT:  # %bb.1: # %start
1658; RV64IF-NEXT:    li a0, 0
1659; RV64IF-NEXT:    ret
1660; RV64IF-NEXT:  .LBB28_2:
1661; RV64IF-NEXT:    lui a0, %hi(.LCPI28_0)
1662; RV64IF-NEXT:    flw ft0, %lo(.LCPI28_0)(a0)
1663; RV64IF-NEXT:    lui a0, %hi(.LCPI28_1)
1664; RV64IF-NEXT:    flw ft1, %lo(.LCPI28_1)(a0)
1665; RV64IF-NEXT:    fmax.s ft0, fa0, ft0
1666; RV64IF-NEXT:    fmin.s ft0, ft0, ft1
1667; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
1668; RV64IF-NEXT:    ret
1669;
1670; RV32I-LABEL: fcvt_w_s_sat_i8:
1671; RV32I:       # %bb.0: # %start
1672; RV32I-NEXT:    addi sp, sp, -32
1673; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
1674; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
1675; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
1676; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
1677; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
1678; RV32I-NEXT:    mv s0, a0
1679; RV32I-NEXT:    lui a1, 798720
1680; RV32I-NEXT:    call __gesf2@plt
1681; RV32I-NEXT:    mv s2, a0
1682; RV32I-NEXT:    mv a0, s0
1683; RV32I-NEXT:    call __fixsfsi@plt
1684; RV32I-NEXT:    li s1, 0
1685; RV32I-NEXT:    li s3, -128
1686; RV32I-NEXT:    bltz s2, .LBB28_2
1687; RV32I-NEXT:  # %bb.1: # %start
1688; RV32I-NEXT:    mv s3, a0
1689; RV32I-NEXT:  .LBB28_2: # %start
1690; RV32I-NEXT:    lui a1, 274400
1691; RV32I-NEXT:    mv a0, s0
1692; RV32I-NEXT:    call __gtsf2@plt
1693; RV32I-NEXT:    li s2, 127
1694; RV32I-NEXT:    blt s1, a0, .LBB28_4
1695; RV32I-NEXT:  # %bb.3: # %start
1696; RV32I-NEXT:    mv s2, s3
1697; RV32I-NEXT:  .LBB28_4: # %start
1698; RV32I-NEXT:    mv a0, s0
1699; RV32I-NEXT:    mv a1, s0
1700; RV32I-NEXT:    call __unordsf2@plt
1701; RV32I-NEXT:    bne a0, s1, .LBB28_6
1702; RV32I-NEXT:  # %bb.5: # %start
1703; RV32I-NEXT:    mv s1, s2
1704; RV32I-NEXT:  .LBB28_6: # %start
1705; RV32I-NEXT:    slli a0, s1, 24
1706; RV32I-NEXT:    srai a0, a0, 24
1707; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
1708; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
1709; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
1710; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
1711; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
1712; RV32I-NEXT:    addi sp, sp, 32
1713; RV32I-NEXT:    ret
1714;
1715; RV64I-LABEL: fcvt_w_s_sat_i8:
1716; RV64I:       # %bb.0: # %start
1717; RV64I-NEXT:    addi sp, sp, -48
1718; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
1719; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
1720; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
1721; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
1722; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
1723; RV64I-NEXT:    mv s0, a0
1724; RV64I-NEXT:    lui a1, 798720
1725; RV64I-NEXT:    call __gesf2@plt
1726; RV64I-NEXT:    mv s2, a0
1727; RV64I-NEXT:    mv a0, s0
1728; RV64I-NEXT:    call __fixsfdi@plt
1729; RV64I-NEXT:    li s1, 0
1730; RV64I-NEXT:    li s3, -128
1731; RV64I-NEXT:    bltz s2, .LBB28_2
1732; RV64I-NEXT:  # %bb.1: # %start
1733; RV64I-NEXT:    mv s3, a0
1734; RV64I-NEXT:  .LBB28_2: # %start
1735; RV64I-NEXT:    lui a1, 274400
1736; RV64I-NEXT:    mv a0, s0
1737; RV64I-NEXT:    call __gtsf2@plt
1738; RV64I-NEXT:    li s2, 127
1739; RV64I-NEXT:    blt s1, a0, .LBB28_4
1740; RV64I-NEXT:  # %bb.3: # %start
1741; RV64I-NEXT:    mv s2, s3
1742; RV64I-NEXT:  .LBB28_4: # %start
1743; RV64I-NEXT:    mv a0, s0
1744; RV64I-NEXT:    mv a1, s0
1745; RV64I-NEXT:    call __unordsf2@plt
1746; RV64I-NEXT:    bne a0, s1, .LBB28_6
1747; RV64I-NEXT:  # %bb.5: # %start
1748; RV64I-NEXT:    mv s1, s2
1749; RV64I-NEXT:  .LBB28_6: # %start
1750; RV64I-NEXT:    slli a0, s1, 56
1751; RV64I-NEXT:    srai a0, a0, 56
1752; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
1753; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
1754; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
1755; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
1756; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
1757; RV64I-NEXT:    addi sp, sp, 48
1758; RV64I-NEXT:    ret
1759start:
1760  %0 = tail call i8 @llvm.fptosi.sat.i8.f32(float %a)
1761  ret i8 %0
1762}
1763declare i8 @llvm.fptosi.sat.i8.f32(float)
1764
1765define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
1766; RV32IF-LABEL: fcvt_wu_s_i8:
1767; RV32IF:       # %bb.0:
1768; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
1769; RV32IF-NEXT:    ret
1770;
1771; RV64IF-LABEL: fcvt_wu_s_i8:
1772; RV64IF:       # %bb.0:
1773; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
1774; RV64IF-NEXT:    ret
1775;
1776; RV32I-LABEL: fcvt_wu_s_i8:
1777; RV32I:       # %bb.0:
1778; RV32I-NEXT:    addi sp, sp, -16
1779; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1780; RV32I-NEXT:    call __fixunssfsi@plt
1781; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1782; RV32I-NEXT:    addi sp, sp, 16
1783; RV32I-NEXT:    ret
1784;
1785; RV64I-LABEL: fcvt_wu_s_i8:
1786; RV64I:       # %bb.0:
1787; RV64I-NEXT:    addi sp, sp, -16
1788; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1789; RV64I-NEXT:    call __fixunssfdi@plt
1790; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1791; RV64I-NEXT:    addi sp, sp, 16
1792; RV64I-NEXT:    ret
1793  %1 = fptoui float %a to i8
1794  ret i8 %1
1795}
1796
1797define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
1798; RV32IF-LABEL: fcvt_wu_s_sat_i8:
1799; RV32IF:       # %bb.0: # %start
1800; RV32IF-NEXT:    lui a0, %hi(.LCPI30_0)
1801; RV32IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a0)
1802; RV32IF-NEXT:    fmv.w.x ft1, zero
1803; RV32IF-NEXT:    fmax.s ft1, fa0, ft1
1804; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
1805; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
1806; RV32IF-NEXT:    ret
1807;
1808; RV64IF-LABEL: fcvt_wu_s_sat_i8:
1809; RV64IF:       # %bb.0: # %start
1810; RV64IF-NEXT:    lui a0, %hi(.LCPI30_0)
1811; RV64IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a0)
1812; RV64IF-NEXT:    fmv.w.x ft1, zero
1813; RV64IF-NEXT:    fmax.s ft1, fa0, ft1
1814; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
1815; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
1816; RV64IF-NEXT:    ret
1817;
1818; RV32I-LABEL: fcvt_wu_s_sat_i8:
1819; RV32I:       # %bb.0: # %start
1820; RV32I-NEXT:    addi sp, sp, -16
1821; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1822; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1823; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1824; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
1825; RV32I-NEXT:    mv s0, a0
1826; RV32I-NEXT:    li a1, 0
1827; RV32I-NEXT:    call __gesf2@plt
1828; RV32I-NEXT:    mv s1, a0
1829; RV32I-NEXT:    mv a0, s0
1830; RV32I-NEXT:    call __fixunssfsi@plt
1831; RV32I-NEXT:    li s2, 0
1832; RV32I-NEXT:    bltz s1, .LBB30_2
1833; RV32I-NEXT:  # %bb.1: # %start
1834; RV32I-NEXT:    mv s2, a0
1835; RV32I-NEXT:  .LBB30_2: # %start
1836; RV32I-NEXT:    lui a1, 276464
1837; RV32I-NEXT:    mv a0, s0
1838; RV32I-NEXT:    call __gtsf2@plt
1839; RV32I-NEXT:    li a1, 255
1840; RV32I-NEXT:    bgtz a0, .LBB30_4
1841; RV32I-NEXT:  # %bb.3: # %start
1842; RV32I-NEXT:    mv a1, s2
1843; RV32I-NEXT:  .LBB30_4: # %start
1844; RV32I-NEXT:    andi a0, a1, 255
1845; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1846; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1847; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1848; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
1849; RV32I-NEXT:    addi sp, sp, 16
1850; RV32I-NEXT:    ret
1851;
1852; RV64I-LABEL: fcvt_wu_s_sat_i8:
1853; RV64I:       # %bb.0: # %start
1854; RV64I-NEXT:    addi sp, sp, -32
1855; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1856; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1857; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1858; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
1859; RV64I-NEXT:    mv s0, a0
1860; RV64I-NEXT:    li a1, 0
1861; RV64I-NEXT:    call __gesf2@plt
1862; RV64I-NEXT:    mv s1, a0
1863; RV64I-NEXT:    mv a0, s0
1864; RV64I-NEXT:    call __fixunssfdi@plt
1865; RV64I-NEXT:    li s2, 0
1866; RV64I-NEXT:    bltz s1, .LBB30_2
1867; RV64I-NEXT:  # %bb.1: # %start
1868; RV64I-NEXT:    mv s2, a0
1869; RV64I-NEXT:  .LBB30_2: # %start
1870; RV64I-NEXT:    lui a1, 276464
1871; RV64I-NEXT:    mv a0, s0
1872; RV64I-NEXT:    call __gtsf2@plt
1873; RV64I-NEXT:    li a1, 255
1874; RV64I-NEXT:    bgtz a0, .LBB30_4
1875; RV64I-NEXT:  # %bb.3: # %start
1876; RV64I-NEXT:    mv a1, s2
1877; RV64I-NEXT:  .LBB30_4: # %start
1878; RV64I-NEXT:    andi a0, a1, 255
1879; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1880; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1881; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1882; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
1883; RV64I-NEXT:    addi sp, sp, 32
1884; RV64I-NEXT:    ret
1885start:
1886  %0 = tail call i8 @llvm.fptoui.sat.i8.f32(float %a)
1887  ret i8 %0
1888}
1889declare i8 @llvm.fptoui.sat.i8.f32(float)
1890