1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   -target-abi=ilp32f | FileCheck -check-prefixes=CHECKIF,RV32IF %s
4; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5; RUN:   -target-abi=lp64f | FileCheck -check-prefixes=CHECKIF,RV64IF %s
6; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
7; RUN:   | FileCheck -check-prefix=RV32I %s
8; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
9; RUN:   | FileCheck -check-prefix=RV64I %s
10
11define i32 @fcvt_w_s(float %a) nounwind {
12; CHECKIF-LABEL: fcvt_w_s:
13; CHECKIF:       # %bb.0:
14; CHECKIF-NEXT:    fcvt.w.s a0, fa0, rtz
15; CHECKIF-NEXT:    ret
16;
17; RV32I-LABEL: fcvt_w_s:
18; RV32I:       # %bb.0:
19; RV32I-NEXT:    addi sp, sp, -16
20; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
21; RV32I-NEXT:    call __fixsfsi@plt
22; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
23; RV32I-NEXT:    addi sp, sp, 16
24; RV32I-NEXT:    ret
25;
26; RV64I-LABEL: fcvt_w_s:
27; RV64I:       # %bb.0:
28; RV64I-NEXT:    addi sp, sp, -16
29; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
30; RV64I-NEXT:    call __fixsfsi@plt
31; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
32; RV64I-NEXT:    addi sp, sp, 16
33; RV64I-NEXT:    ret
34  %1 = fptosi float %a to i32
35  ret i32 %1
36}
37
38define i32 @fcvt_w_s_sat(float %a) nounwind {
39; CHECKIF-LABEL: fcvt_w_s_sat:
40; CHECKIF:       # %bb.0: # %start
41; CHECKIF-NEXT:    feq.s a0, fa0, fa0
42; CHECKIF-NEXT:    beqz a0, .LBB1_2
43; CHECKIF-NEXT:  # %bb.1:
44; CHECKIF-NEXT:    fcvt.w.s a0, fa0, rtz
45; CHECKIF-NEXT:  .LBB1_2: # %start
46; CHECKIF-NEXT:    ret
47;
48; RV32I-LABEL: fcvt_w_s_sat:
49; RV32I:       # %bb.0: # %start
50; RV32I-NEXT:    addi sp, sp, -32
51; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
52; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
53; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
54; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
55; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
56; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
57; RV32I-NEXT:    mv s0, a0
58; RV32I-NEXT:    lui a1, 847872
59; RV32I-NEXT:    call __gesf2@plt
60; RV32I-NEXT:    mv s2, a0
61; RV32I-NEXT:    mv a0, s0
62; RV32I-NEXT:    call __fixsfsi@plt
63; RV32I-NEXT:    li s1, 0
64; RV32I-NEXT:    lui s4, 524288
65; RV32I-NEXT:    lui s3, 524288
66; RV32I-NEXT:    bltz s2, .LBB1_2
67; RV32I-NEXT:  # %bb.1: # %start
68; RV32I-NEXT:    mv s3, a0
69; RV32I-NEXT:  .LBB1_2: # %start
70; RV32I-NEXT:    lui a0, 323584
71; RV32I-NEXT:    addi a1, a0, -1
72; RV32I-NEXT:    mv a0, s0
73; RV32I-NEXT:    call __gtsf2@plt
74; RV32I-NEXT:    bge s1, a0, .LBB1_4
75; RV32I-NEXT:  # %bb.3:
76; RV32I-NEXT:    addi s3, s4, -1
77; RV32I-NEXT:  .LBB1_4: # %start
78; RV32I-NEXT:    mv a0, s0
79; RV32I-NEXT:    mv a1, s0
80; RV32I-NEXT:    call __unordsf2@plt
81; RV32I-NEXT:    bne a0, s1, .LBB1_6
82; RV32I-NEXT:  # %bb.5: # %start
83; RV32I-NEXT:    mv s1, s3
84; RV32I-NEXT:  .LBB1_6: # %start
85; RV32I-NEXT:    mv a0, s1
86; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
87; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
88; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
89; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
90; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
91; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
92; RV32I-NEXT:    addi sp, sp, 32
93; RV32I-NEXT:    ret
94;
95; RV64I-LABEL: fcvt_w_s_sat:
96; RV64I:       # %bb.0: # %start
97; RV64I-NEXT:    addi sp, sp, -48
98; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
99; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
100; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
101; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
102; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
103; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
104; RV64I-NEXT:    mv s0, a0
105; RV64I-NEXT:    lui a1, 847872
106; RV64I-NEXT:    call __gesf2@plt
107; RV64I-NEXT:    mv s2, a0
108; RV64I-NEXT:    mv a0, s0
109; RV64I-NEXT:    call __fixsfdi@plt
110; RV64I-NEXT:    li s1, 0
111; RV64I-NEXT:    lui s4, 524288
112; RV64I-NEXT:    lui s3, 524288
113; RV64I-NEXT:    bltz s2, .LBB1_2
114; RV64I-NEXT:  # %bb.1: # %start
115; RV64I-NEXT:    mv s3, a0
116; RV64I-NEXT:  .LBB1_2: # %start
117; RV64I-NEXT:    lui a0, 323584
118; RV64I-NEXT:    addiw a1, a0, -1
119; RV64I-NEXT:    mv a0, s0
120; RV64I-NEXT:    call __gtsf2@plt
121; RV64I-NEXT:    bge s1, a0, .LBB1_4
122; RV64I-NEXT:  # %bb.3:
123; RV64I-NEXT:    addiw s3, s4, -1
124; RV64I-NEXT:  .LBB1_4: # %start
125; RV64I-NEXT:    mv a0, s0
126; RV64I-NEXT:    mv a1, s0
127; RV64I-NEXT:    call __unordsf2@plt
128; RV64I-NEXT:    bne a0, s1, .LBB1_6
129; RV64I-NEXT:  # %bb.5: # %start
130; RV64I-NEXT:    mv s1, s3
131; RV64I-NEXT:  .LBB1_6: # %start
132; RV64I-NEXT:    mv a0, s1
133; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
134; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
135; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
136; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
137; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
138; RV64I-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
139; RV64I-NEXT:    addi sp, sp, 48
140; RV64I-NEXT:    ret
141start:
142  %0 = tail call i32 @llvm.fptosi.sat.i32.f32(float %a)
143  ret i32 %0
144}
145declare i32 @llvm.fptosi.sat.i32.f32(float)
146
147define i32 @fcvt_wu_s(float %a) nounwind {
148; CHECKIF-LABEL: fcvt_wu_s:
149; CHECKIF:       # %bb.0:
150; CHECKIF-NEXT:    fcvt.wu.s a0, fa0, rtz
151; CHECKIF-NEXT:    ret
152;
153; RV32I-LABEL: fcvt_wu_s:
154; RV32I:       # %bb.0:
155; RV32I-NEXT:    addi sp, sp, -16
156; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
157; RV32I-NEXT:    call __fixunssfsi@plt
158; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
159; RV32I-NEXT:    addi sp, sp, 16
160; RV32I-NEXT:    ret
161;
162; RV64I-LABEL: fcvt_wu_s:
163; RV64I:       # %bb.0:
164; RV64I-NEXT:    addi sp, sp, -16
165; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
166; RV64I-NEXT:    call __fixunssfsi@plt
167; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
168; RV64I-NEXT:    addi sp, sp, 16
169; RV64I-NEXT:    ret
170  %1 = fptoui float %a to i32
171  ret i32 %1
172}
173
174; Test where the fptoui has multiple uses, one of which causes a sext to be
175; inserted on RV64.
176define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind {
177; CHECKIF-LABEL: fcvt_wu_s_multiple_use:
178; CHECKIF:       # %bb.0:
179; CHECKIF-NEXT:    fcvt.wu.s a1, fa0, rtz
180; CHECKIF-NEXT:    li a0, 1
181; CHECKIF-NEXT:    beqz a1, .LBB3_2
182; CHECKIF-NEXT:  # %bb.1:
183; CHECKIF-NEXT:    mv a0, a1
184; CHECKIF-NEXT:  .LBB3_2:
185; CHECKIF-NEXT:    ret
186;
187; RV32I-LABEL: fcvt_wu_s_multiple_use:
188; RV32I:       # %bb.0:
189; RV32I-NEXT:    addi sp, sp, -16
190; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
191; RV32I-NEXT:    call __fixunssfsi@plt
192; RV32I-NEXT:    mv a1, a0
193; RV32I-NEXT:    li a0, 1
194; RV32I-NEXT:    beqz a1, .LBB3_2
195; RV32I-NEXT:  # %bb.1:
196; RV32I-NEXT:    mv a0, a1
197; RV32I-NEXT:  .LBB3_2:
198; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
199; RV32I-NEXT:    addi sp, sp, 16
200; RV32I-NEXT:    ret
201;
202; RV64I-LABEL: fcvt_wu_s_multiple_use:
203; RV64I:       # %bb.0:
204; RV64I-NEXT:    addi sp, sp, -16
205; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
206; RV64I-NEXT:    call __fixunssfsi@plt
207; RV64I-NEXT:    mv a1, a0
208; RV64I-NEXT:    li a0, 1
209; RV64I-NEXT:    beqz a1, .LBB3_2
210; RV64I-NEXT:  # %bb.1:
211; RV64I-NEXT:    mv a0, a1
212; RV64I-NEXT:  .LBB3_2:
213; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
214; RV64I-NEXT:    addi sp, sp, 16
215; RV64I-NEXT:    ret
216  %a = fptoui float %x to i32
217  %b = icmp eq i32 %a, 0
218  %c = select i1 %b, i32 1, i32 %a
219  ret i32 %c
220}
221
222define i32 @fcvt_wu_s_sat(float %a) nounwind {
223; CHECKIF-LABEL: fcvt_wu_s_sat:
224; CHECKIF:       # %bb.0: # %start
225; CHECKIF-NEXT:    feq.s a0, fa0, fa0
226; CHECKIF-NEXT:    beqz a0, .LBB4_2
227; CHECKIF-NEXT:  # %bb.1:
228; CHECKIF-NEXT:    fcvt.wu.s a0, fa0, rtz
229; CHECKIF-NEXT:  .LBB4_2: # %start
230; CHECKIF-NEXT:    ret
231;
232; RV32I-LABEL: fcvt_wu_s_sat:
233; RV32I:       # %bb.0: # %start
234; RV32I-NEXT:    addi sp, sp, -16
235; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
236; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
237; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
238; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
239; RV32I-NEXT:    mv s0, a0
240; RV32I-NEXT:    li a1, 0
241; RV32I-NEXT:    call __gesf2@plt
242; RV32I-NEXT:    mv s1, a0
243; RV32I-NEXT:    mv a0, s0
244; RV32I-NEXT:    call __fixunssfsi@plt
245; RV32I-NEXT:    li s2, 0
246; RV32I-NEXT:    bltz s1, .LBB4_2
247; RV32I-NEXT:  # %bb.1: # %start
248; RV32I-NEXT:    mv s2, a0
249; RV32I-NEXT:  .LBB4_2: # %start
250; RV32I-NEXT:    lui a0, 325632
251; RV32I-NEXT:    addi a1, a0, -1
252; RV32I-NEXT:    mv a0, s0
253; RV32I-NEXT:    call __gtsf2@plt
254; RV32I-NEXT:    mv a1, a0
255; RV32I-NEXT:    li a0, -1
256; RV32I-NEXT:    bgtz a1, .LBB4_4
257; RV32I-NEXT:  # %bb.3: # %start
258; RV32I-NEXT:    mv a0, s2
259; RV32I-NEXT:  .LBB4_4: # %start
260; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
261; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
262; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
263; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
264; RV32I-NEXT:    addi sp, sp, 16
265; RV32I-NEXT:    ret
266;
267; RV64I-LABEL: fcvt_wu_s_sat:
268; RV64I:       # %bb.0: # %start
269; RV64I-NEXT:    addi sp, sp, -32
270; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
271; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
272; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
273; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
274; RV64I-NEXT:    mv s0, a0
275; RV64I-NEXT:    li a1, 0
276; RV64I-NEXT:    call __gesf2@plt
277; RV64I-NEXT:    mv s2, a0
278; RV64I-NEXT:    mv a0, s0
279; RV64I-NEXT:    call __fixunssfdi@plt
280; RV64I-NEXT:    li s1, 0
281; RV64I-NEXT:    bltz s2, .LBB4_2
282; RV64I-NEXT:  # %bb.1: # %start
283; RV64I-NEXT:    mv s1, a0
284; RV64I-NEXT:  .LBB4_2: # %start
285; RV64I-NEXT:    lui a0, 325632
286; RV64I-NEXT:    addiw a1, a0, -1
287; RV64I-NEXT:    mv a0, s0
288; RV64I-NEXT:    call __gtsf2@plt
289; RV64I-NEXT:    blez a0, .LBB4_4
290; RV64I-NEXT:  # %bb.3:
291; RV64I-NEXT:    li a0, -1
292; RV64I-NEXT:    srli s1, a0, 32
293; RV64I-NEXT:  .LBB4_4: # %start
294; RV64I-NEXT:    mv a0, s1
295; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
296; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
297; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
298; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
299; RV64I-NEXT:    addi sp, sp, 32
300; RV64I-NEXT:    ret
301start:
302  %0 = tail call i32 @llvm.fptoui.sat.i32.f32(float %a)
303  ret i32 %0
304}
305declare i32 @llvm.fptoui.sat.i32.f32(float)
306
307define i32 @fmv_x_w(float %a, float %b) nounwind {
308; CHECKIF-LABEL: fmv_x_w:
309; CHECKIF:       # %bb.0:
310; CHECKIF-NEXT:    fadd.s ft0, fa0, fa1
311; CHECKIF-NEXT:    fmv.x.w a0, ft0
312; CHECKIF-NEXT:    ret
313;
314; RV32I-LABEL: fmv_x_w:
315; RV32I:       # %bb.0:
316; RV32I-NEXT:    addi sp, sp, -16
317; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
318; RV32I-NEXT:    call __addsf3@plt
319; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
320; RV32I-NEXT:    addi sp, sp, 16
321; RV32I-NEXT:    ret
322;
323; RV64I-LABEL: fmv_x_w:
324; RV64I:       # %bb.0:
325; RV64I-NEXT:    addi sp, sp, -16
326; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
327; RV64I-NEXT:    call __addsf3@plt
328; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
329; RV64I-NEXT:    addi sp, sp, 16
330; RV64I-NEXT:    ret
331; Ensure fmv.x.w is generated even for a soft float calling convention
332  %1 = fadd float %a, %b
333  %2 = bitcast float %1 to i32
334  ret i32 %2
335}
336
337define float @fcvt_s_w(i32 %a) nounwind {
338; CHECKIF-LABEL: fcvt_s_w:
339; CHECKIF:       # %bb.0:
340; CHECKIF-NEXT:    fcvt.s.w fa0, a0
341; CHECKIF-NEXT:    ret
342;
343; RV32I-LABEL: fcvt_s_w:
344; RV32I:       # %bb.0:
345; RV32I-NEXT:    addi sp, sp, -16
346; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
347; RV32I-NEXT:    call __floatsisf@plt
348; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
349; RV32I-NEXT:    addi sp, sp, 16
350; RV32I-NEXT:    ret
351;
352; RV64I-LABEL: fcvt_s_w:
353; RV64I:       # %bb.0:
354; RV64I-NEXT:    addi sp, sp, -16
355; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
356; RV64I-NEXT:    sext.w a0, a0
357; RV64I-NEXT:    call __floatsisf@plt
358; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
359; RV64I-NEXT:    addi sp, sp, 16
360; RV64I-NEXT:    ret
361  %1 = sitofp i32 %a to float
362  ret float %1
363}
364
365define float @fcvt_s_w_load(i32* %p) nounwind {
366; CHECKIF-LABEL: fcvt_s_w_load:
367; CHECKIF:       # %bb.0:
368; CHECKIF-NEXT:    lw a0, 0(a0)
369; CHECKIF-NEXT:    fcvt.s.w fa0, a0
370; CHECKIF-NEXT:    ret
371;
372; RV32I-LABEL: fcvt_s_w_load:
373; RV32I:       # %bb.0:
374; RV32I-NEXT:    addi sp, sp, -16
375; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
376; RV32I-NEXT:    lw a0, 0(a0)
377; RV32I-NEXT:    call __floatsisf@plt
378; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
379; RV32I-NEXT:    addi sp, sp, 16
380; RV32I-NEXT:    ret
381;
382; RV64I-LABEL: fcvt_s_w_load:
383; RV64I:       # %bb.0:
384; RV64I-NEXT:    addi sp, sp, -16
385; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
386; RV64I-NEXT:    lw a0, 0(a0)
387; RV64I-NEXT:    call __floatsisf@plt
388; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
389; RV64I-NEXT:    addi sp, sp, 16
390; RV64I-NEXT:    ret
391  %a = load i32, i32* %p
392  %1 = sitofp i32 %a to float
393  ret float %1
394}
395
396define float @fcvt_s_wu(i32 %a) nounwind {
397; CHECKIF-LABEL: fcvt_s_wu:
398; CHECKIF:       # %bb.0:
399; CHECKIF-NEXT:    fcvt.s.wu fa0, a0
400; CHECKIF-NEXT:    ret
401;
402; RV32I-LABEL: fcvt_s_wu:
403; RV32I:       # %bb.0:
404; RV32I-NEXT:    addi sp, sp, -16
405; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
406; RV32I-NEXT:    call __floatunsisf@plt
407; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
408; RV32I-NEXT:    addi sp, sp, 16
409; RV32I-NEXT:    ret
410;
411; RV64I-LABEL: fcvt_s_wu:
412; RV64I:       # %bb.0:
413; RV64I-NEXT:    addi sp, sp, -16
414; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
415; RV64I-NEXT:    sext.w a0, a0
416; RV64I-NEXT:    call __floatunsisf@plt
417; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
418; RV64I-NEXT:    addi sp, sp, 16
419; RV64I-NEXT:    ret
420  %1 = uitofp i32 %a to float
421  ret float %1
422}
423
424define float @fcvt_s_wu_load(i32* %p) nounwind {
425; RV32IF-LABEL: fcvt_s_wu_load:
426; RV32IF:       # %bb.0:
427; RV32IF-NEXT:    lw a0, 0(a0)
428; RV32IF-NEXT:    fcvt.s.wu fa0, a0
429; RV32IF-NEXT:    ret
430;
431; RV64IF-LABEL: fcvt_s_wu_load:
432; RV64IF:       # %bb.0:
433; RV64IF-NEXT:    lwu a0, 0(a0)
434; RV64IF-NEXT:    fcvt.s.wu fa0, a0
435; RV64IF-NEXT:    ret
436;
437; RV32I-LABEL: fcvt_s_wu_load:
438; RV32I:       # %bb.0:
439; RV32I-NEXT:    addi sp, sp, -16
440; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
441; RV32I-NEXT:    lw a0, 0(a0)
442; RV32I-NEXT:    call __floatunsisf@plt
443; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
444; RV32I-NEXT:    addi sp, sp, 16
445; RV32I-NEXT:    ret
446;
447; RV64I-LABEL: fcvt_s_wu_load:
448; RV64I:       # %bb.0:
449; RV64I-NEXT:    addi sp, sp, -16
450; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
451; RV64I-NEXT:    lw a0, 0(a0)
452; RV64I-NEXT:    call __floatunsisf@plt
453; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
454; RV64I-NEXT:    addi sp, sp, 16
455; RV64I-NEXT:    ret
456  %a = load i32, i32* %p
457  %1 = uitofp i32 %a to float
458  ret float %1
459}
460
461define float @fmv_w_x(i32 %a, i32 %b) nounwind {
462; CHECKIF-LABEL: fmv_w_x:
463; CHECKIF:       # %bb.0:
464; CHECKIF-NEXT:    fmv.w.x ft0, a0
465; CHECKIF-NEXT:    fmv.w.x ft1, a1
466; CHECKIF-NEXT:    fadd.s fa0, ft0, ft1
467; CHECKIF-NEXT:    ret
468;
469; RV32I-LABEL: fmv_w_x:
470; RV32I:       # %bb.0:
471; RV32I-NEXT:    addi sp, sp, -16
472; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
473; RV32I-NEXT:    call __addsf3@plt
474; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
475; RV32I-NEXT:    addi sp, sp, 16
476; RV32I-NEXT:    ret
477;
478; RV64I-LABEL: fmv_w_x:
479; RV64I:       # %bb.0:
480; RV64I-NEXT:    addi sp, sp, -16
481; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
482; RV64I-NEXT:    call __addsf3@plt
483; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
484; RV64I-NEXT:    addi sp, sp, 16
485; RV64I-NEXT:    ret
486; Ensure fmv.w.x is generated even for a soft float calling convention
487  %1 = bitcast i32 %a to float
488  %2 = bitcast i32 %b to float
489  %3 = fadd float %1, %2
490  ret float %3
491}
492
493define i64 @fcvt_l_s(float %a) nounwind {
494; RV32IF-LABEL: fcvt_l_s:
495; RV32IF:       # %bb.0:
496; RV32IF-NEXT:    addi sp, sp, -16
497; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
498; RV32IF-NEXT:    call __fixsfdi@plt
499; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
500; RV32IF-NEXT:    addi sp, sp, 16
501; RV32IF-NEXT:    ret
502;
503; RV64IF-LABEL: fcvt_l_s:
504; RV64IF:       # %bb.0:
505; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
506; RV64IF-NEXT:    ret
507;
508; RV32I-LABEL: fcvt_l_s:
509; RV32I:       # %bb.0:
510; RV32I-NEXT:    addi sp, sp, -16
511; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
512; RV32I-NEXT:    call __fixsfdi@plt
513; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
514; RV32I-NEXT:    addi sp, sp, 16
515; RV32I-NEXT:    ret
516;
517; RV64I-LABEL: fcvt_l_s:
518; RV64I:       # %bb.0:
519; RV64I-NEXT:    addi sp, sp, -16
520; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
521; RV64I-NEXT:    call __fixsfdi@plt
522; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
523; RV64I-NEXT:    addi sp, sp, 16
524; RV64I-NEXT:    ret
525  %1 = fptosi float %a to i64
526  ret i64 %1
527}
528
529define i64 @fcvt_l_s_sat(float %a) nounwind {
530; RV32IF-LABEL: fcvt_l_s_sat:
531; RV32IF:       # %bb.0: # %start
532; RV32IF-NEXT:    addi sp, sp, -16
533; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
534; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
535; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
536; RV32IF-NEXT:    lui a0, %hi(.LCPI12_0)
537; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_0)(a0)
538; RV32IF-NEXT:    fmv.s fs0, fa0
539; RV32IF-NEXT:    fle.s s0, ft0, fa0
540; RV32IF-NEXT:    call __fixsfdi@plt
541; RV32IF-NEXT:    mv a2, a0
542; RV32IF-NEXT:    bnez s0, .LBB12_2
543; RV32IF-NEXT:  # %bb.1: # %start
544; RV32IF-NEXT:    li a2, 0
545; RV32IF-NEXT:  .LBB12_2: # %start
546; RV32IF-NEXT:    lui a0, %hi(.LCPI12_1)
547; RV32IF-NEXT:    flw ft0, %lo(.LCPI12_1)(a0)
548; RV32IF-NEXT:    flt.s a3, ft0, fs0
549; RV32IF-NEXT:    li a0, -1
550; RV32IF-NEXT:    beqz a3, .LBB12_9
551; RV32IF-NEXT:  # %bb.3: # %start
552; RV32IF-NEXT:    feq.s a2, fs0, fs0
553; RV32IF-NEXT:    beqz a2, .LBB12_10
554; RV32IF-NEXT:  .LBB12_4: # %start
555; RV32IF-NEXT:    lui a4, 524288
556; RV32IF-NEXT:    beqz s0, .LBB12_11
557; RV32IF-NEXT:  .LBB12_5: # %start
558; RV32IF-NEXT:    bnez a3, .LBB12_12
559; RV32IF-NEXT:  .LBB12_6: # %start
560; RV32IF-NEXT:    bnez a2, .LBB12_8
561; RV32IF-NEXT:  .LBB12_7: # %start
562; RV32IF-NEXT:    li a1, 0
563; RV32IF-NEXT:  .LBB12_8: # %start
564; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
565; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
566; RV32IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
567; RV32IF-NEXT:    addi sp, sp, 16
568; RV32IF-NEXT:    ret
569; RV32IF-NEXT:  .LBB12_9: # %start
570; RV32IF-NEXT:    mv a0, a2
571; RV32IF-NEXT:    feq.s a2, fs0, fs0
572; RV32IF-NEXT:    bnez a2, .LBB12_4
573; RV32IF-NEXT:  .LBB12_10: # %start
574; RV32IF-NEXT:    li a0, 0
575; RV32IF-NEXT:    lui a4, 524288
576; RV32IF-NEXT:    bnez s0, .LBB12_5
577; RV32IF-NEXT:  .LBB12_11: # %start
578; RV32IF-NEXT:    lui a1, 524288
579; RV32IF-NEXT:    beqz a3, .LBB12_6
580; RV32IF-NEXT:  .LBB12_12:
581; RV32IF-NEXT:    addi a1, a4, -1
582; RV32IF-NEXT:    beqz a2, .LBB12_7
583; RV32IF-NEXT:    j .LBB12_8
584;
585; RV64IF-LABEL: fcvt_l_s_sat:
586; RV64IF:       # %bb.0: # %start
587; RV64IF-NEXT:    feq.s a0, fa0, fa0
588; RV64IF-NEXT:    beqz a0, .LBB12_2
589; RV64IF-NEXT:  # %bb.1:
590; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
591; RV64IF-NEXT:  .LBB12_2: # %start
592; RV64IF-NEXT:    ret
593;
594; RV32I-LABEL: fcvt_l_s_sat:
595; RV32I:       # %bb.0: # %start
596; RV32I-NEXT:    addi sp, sp, -32
597; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
598; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
599; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
600; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
601; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
602; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
603; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
604; RV32I-NEXT:    sw s6, 0(sp) # 4-byte Folded Spill
605; RV32I-NEXT:    mv s0, a0
606; RV32I-NEXT:    lui a1, 913408
607; RV32I-NEXT:    call __gesf2@plt
608; RV32I-NEXT:    mv s3, a0
609; RV32I-NEXT:    mv a0, s0
610; RV32I-NEXT:    call __fixsfdi@plt
611; RV32I-NEXT:    mv s2, a1
612; RV32I-NEXT:    li s1, 0
613; RV32I-NEXT:    li s5, 0
614; RV32I-NEXT:    bltz s3, .LBB12_2
615; RV32I-NEXT:  # %bb.1: # %start
616; RV32I-NEXT:    mv s5, a0
617; RV32I-NEXT:  .LBB12_2: # %start
618; RV32I-NEXT:    lui a0, 389120
619; RV32I-NEXT:    addi s4, a0, -1
620; RV32I-NEXT:    mv a0, s0
621; RV32I-NEXT:    mv a1, s4
622; RV32I-NEXT:    call __gtsf2@plt
623; RV32I-NEXT:    li s6, -1
624; RV32I-NEXT:    blt s1, a0, .LBB12_4
625; RV32I-NEXT:  # %bb.3: # %start
626; RV32I-NEXT:    mv s6, s5
627; RV32I-NEXT:  .LBB12_4: # %start
628; RV32I-NEXT:    mv a0, s0
629; RV32I-NEXT:    mv a1, s0
630; RV32I-NEXT:    call __unordsf2@plt
631; RV32I-NEXT:    mv s3, s1
632; RV32I-NEXT:    bne a0, s1, .LBB12_6
633; RV32I-NEXT:  # %bb.5: # %start
634; RV32I-NEXT:    mv s3, s6
635; RV32I-NEXT:  .LBB12_6: # %start
636; RV32I-NEXT:    lui a1, 913408
637; RV32I-NEXT:    mv a0, s0
638; RV32I-NEXT:    call __gesf2@plt
639; RV32I-NEXT:    lui s6, 524288
640; RV32I-NEXT:    lui s5, 524288
641; RV32I-NEXT:    blt a0, s1, .LBB12_8
642; RV32I-NEXT:  # %bb.7: # %start
643; RV32I-NEXT:    mv s5, s2
644; RV32I-NEXT:  .LBB12_8: # %start
645; RV32I-NEXT:    mv a0, s0
646; RV32I-NEXT:    mv a1, s4
647; RV32I-NEXT:    call __gtsf2@plt
648; RV32I-NEXT:    bge s1, a0, .LBB12_10
649; RV32I-NEXT:  # %bb.9:
650; RV32I-NEXT:    addi s5, s6, -1
651; RV32I-NEXT:  .LBB12_10: # %start
652; RV32I-NEXT:    mv a0, s0
653; RV32I-NEXT:    mv a1, s0
654; RV32I-NEXT:    call __unordsf2@plt
655; RV32I-NEXT:    bne a0, s1, .LBB12_12
656; RV32I-NEXT:  # %bb.11: # %start
657; RV32I-NEXT:    mv s1, s5
658; RV32I-NEXT:  .LBB12_12: # %start
659; RV32I-NEXT:    mv a0, s3
660; RV32I-NEXT:    mv a1, s1
661; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
662; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
663; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
664; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
665; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
666; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
667; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
668; RV32I-NEXT:    lw s6, 0(sp) # 4-byte Folded Reload
669; RV32I-NEXT:    addi sp, sp, 32
670; RV32I-NEXT:    ret
671;
672; RV64I-LABEL: fcvt_l_s_sat:
673; RV64I:       # %bb.0: # %start
674; RV64I-NEXT:    addi sp, sp, -48
675; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
676; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
677; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
678; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
679; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
680; RV64I-NEXT:    sd s4, 0(sp) # 8-byte Folded Spill
681; RV64I-NEXT:    mv s0, a0
682; RV64I-NEXT:    lui a1, 913408
683; RV64I-NEXT:    call __gesf2@plt
684; RV64I-NEXT:    mv s3, a0
685; RV64I-NEXT:    mv a0, s0
686; RV64I-NEXT:    call __fixsfdi@plt
687; RV64I-NEXT:    li s1, 0
688; RV64I-NEXT:    li s4, -1
689; RV64I-NEXT:    bltz s3, .LBB12_2
690; RV64I-NEXT:  # %bb.1: # %start
691; RV64I-NEXT:    mv s2, a0
692; RV64I-NEXT:    j .LBB12_3
693; RV64I-NEXT:  .LBB12_2:
694; RV64I-NEXT:    slli s2, s4, 63
695; RV64I-NEXT:  .LBB12_3: # %start
696; RV64I-NEXT:    lui a0, 389120
697; RV64I-NEXT:    addiw a1, a0, -1
698; RV64I-NEXT:    mv a0, s0
699; RV64I-NEXT:    call __gtsf2@plt
700; RV64I-NEXT:    bge s1, a0, .LBB12_5
701; RV64I-NEXT:  # %bb.4:
702; RV64I-NEXT:    srli s2, s4, 1
703; RV64I-NEXT:  .LBB12_5: # %start
704; RV64I-NEXT:    mv a0, s0
705; RV64I-NEXT:    mv a1, s0
706; RV64I-NEXT:    call __unordsf2@plt
707; RV64I-NEXT:    bne a0, s1, .LBB12_7
708; RV64I-NEXT:  # %bb.6: # %start
709; RV64I-NEXT:    mv s1, s2
710; RV64I-NEXT:  .LBB12_7: # %start
711; RV64I-NEXT:    mv a0, s1
712; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
713; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
714; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
715; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
716; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
717; RV64I-NEXT:    ld s4, 0(sp) # 8-byte Folded Reload
718; RV64I-NEXT:    addi sp, sp, 48
719; RV64I-NEXT:    ret
720start:
721  %0 = tail call i64 @llvm.fptosi.sat.i64.f32(float %a)
722  ret i64 %0
723}
724declare i64 @llvm.fptosi.sat.i64.f32(float)
725
726define i64 @fcvt_lu_s(float %a) nounwind {
727; RV32IF-LABEL: fcvt_lu_s:
728; RV32IF:       # %bb.0:
729; RV32IF-NEXT:    addi sp, sp, -16
730; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
731; RV32IF-NEXT:    call __fixunssfdi@plt
732; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
733; RV32IF-NEXT:    addi sp, sp, 16
734; RV32IF-NEXT:    ret
735;
736; RV64IF-LABEL: fcvt_lu_s:
737; RV64IF:       # %bb.0:
738; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
739; RV64IF-NEXT:    ret
740;
741; RV32I-LABEL: fcvt_lu_s:
742; RV32I:       # %bb.0:
743; RV32I-NEXT:    addi sp, sp, -16
744; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
745; RV32I-NEXT:    call __fixunssfdi@plt
746; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
747; RV32I-NEXT:    addi sp, sp, 16
748; RV32I-NEXT:    ret
749;
750; RV64I-LABEL: fcvt_lu_s:
751; RV64I:       # %bb.0:
752; RV64I-NEXT:    addi sp, sp, -16
753; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
754; RV64I-NEXT:    call __fixunssfdi@plt
755; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
756; RV64I-NEXT:    addi sp, sp, 16
757; RV64I-NEXT:    ret
758  %1 = fptoui float %a to i64
759  ret i64 %1
760}
761
762define i64 @fcvt_lu_s_sat(float %a) nounwind {
763; RV32IF-LABEL: fcvt_lu_s_sat:
764; RV32IF:       # %bb.0: # %start
765; RV32IF-NEXT:    addi sp, sp, -16
766; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
767; RV32IF-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
768; RV32IF-NEXT:    fsw fs0, 4(sp) # 4-byte Folded Spill
769; RV32IF-NEXT:    fmv.s fs0, fa0
770; RV32IF-NEXT:    fmv.w.x ft0, zero
771; RV32IF-NEXT:    fle.s s0, ft0, fa0
772; RV32IF-NEXT:    call __fixunssfdi@plt
773; RV32IF-NEXT:    mv a3, a0
774; RV32IF-NEXT:    bnez s0, .LBB14_2
775; RV32IF-NEXT:  # %bb.1: # %start
776; RV32IF-NEXT:    li a3, 0
777; RV32IF-NEXT:  .LBB14_2: # %start
778; RV32IF-NEXT:    lui a0, %hi(.LCPI14_0)
779; RV32IF-NEXT:    flw ft0, %lo(.LCPI14_0)(a0)
780; RV32IF-NEXT:    flt.s a4, ft0, fs0
781; RV32IF-NEXT:    li a2, -1
782; RV32IF-NEXT:    li a0, -1
783; RV32IF-NEXT:    beqz a4, .LBB14_7
784; RV32IF-NEXT:  # %bb.3: # %start
785; RV32IF-NEXT:    beqz s0, .LBB14_8
786; RV32IF-NEXT:  .LBB14_4: # %start
787; RV32IF-NEXT:    bnez a4, .LBB14_6
788; RV32IF-NEXT:  .LBB14_5: # %start
789; RV32IF-NEXT:    mv a2, a1
790; RV32IF-NEXT:  .LBB14_6: # %start
791; RV32IF-NEXT:    mv a1, a2
792; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
793; RV32IF-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
794; RV32IF-NEXT:    flw fs0, 4(sp) # 4-byte Folded Reload
795; RV32IF-NEXT:    addi sp, sp, 16
796; RV32IF-NEXT:    ret
797; RV32IF-NEXT:  .LBB14_7: # %start
798; RV32IF-NEXT:    mv a0, a3
799; RV32IF-NEXT:    bnez s0, .LBB14_4
800; RV32IF-NEXT:  .LBB14_8: # %start
801; RV32IF-NEXT:    li a1, 0
802; RV32IF-NEXT:    beqz a4, .LBB14_5
803; RV32IF-NEXT:    j .LBB14_6
804;
805; RV64IF-LABEL: fcvt_lu_s_sat:
806; RV64IF:       # %bb.0: # %start
807; RV64IF-NEXT:    feq.s a0, fa0, fa0
808; RV64IF-NEXT:    beqz a0, .LBB14_2
809; RV64IF-NEXT:  # %bb.1:
810; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
811; RV64IF-NEXT:  .LBB14_2: # %start
812; RV64IF-NEXT:    ret
813;
814; RV32I-LABEL: fcvt_lu_s_sat:
815; RV32I:       # %bb.0: # %start
816; RV32I-NEXT:    addi sp, sp, -32
817; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
818; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
819; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
820; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
821; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
822; RV32I-NEXT:    sw s4, 8(sp) # 4-byte Folded Spill
823; RV32I-NEXT:    sw s5, 4(sp) # 4-byte Folded Spill
824; RV32I-NEXT:    mv s0, a0
825; RV32I-NEXT:    li a1, 0
826; RV32I-NEXT:    call __gesf2@plt
827; RV32I-NEXT:    mv s2, a0
828; RV32I-NEXT:    mv a0, s0
829; RV32I-NEXT:    call __fixunssfdi@plt
830; RV32I-NEXT:    mv s1, a1
831; RV32I-NEXT:    li s5, 0
832; RV32I-NEXT:    bltz s2, .LBB14_2
833; RV32I-NEXT:  # %bb.1: # %start
834; RV32I-NEXT:    mv s5, a0
835; RV32I-NEXT:  .LBB14_2: # %start
836; RV32I-NEXT:    lui a0, 391168
837; RV32I-NEXT:    addi s4, a0, -1
838; RV32I-NEXT:    mv a0, s0
839; RV32I-NEXT:    mv a1, s4
840; RV32I-NEXT:    call __gtsf2@plt
841; RV32I-NEXT:    li s2, -1
842; RV32I-NEXT:    li s3, -1
843; RV32I-NEXT:    bgtz a0, .LBB14_4
844; RV32I-NEXT:  # %bb.3: # %start
845; RV32I-NEXT:    mv s3, s5
846; RV32I-NEXT:  .LBB14_4: # %start
847; RV32I-NEXT:    mv a0, s0
848; RV32I-NEXT:    li a1, 0
849; RV32I-NEXT:    call __gesf2@plt
850; RV32I-NEXT:    li s5, 0
851; RV32I-NEXT:    bltz a0, .LBB14_6
852; RV32I-NEXT:  # %bb.5: # %start
853; RV32I-NEXT:    mv s5, s1
854; RV32I-NEXT:  .LBB14_6: # %start
855; RV32I-NEXT:    mv a0, s0
856; RV32I-NEXT:    mv a1, s4
857; RV32I-NEXT:    call __gtsf2@plt
858; RV32I-NEXT:    bgtz a0, .LBB14_8
859; RV32I-NEXT:  # %bb.7: # %start
860; RV32I-NEXT:    mv s2, s5
861; RV32I-NEXT:  .LBB14_8: # %start
862; RV32I-NEXT:    mv a0, s3
863; RV32I-NEXT:    mv a1, s2
864; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
865; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
866; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
867; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
868; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
869; RV32I-NEXT:    lw s4, 8(sp) # 4-byte Folded Reload
870; RV32I-NEXT:    lw s5, 4(sp) # 4-byte Folded Reload
871; RV32I-NEXT:    addi sp, sp, 32
872; RV32I-NEXT:    ret
873;
874; RV64I-LABEL: fcvt_lu_s_sat:
875; RV64I:       # %bb.0: # %start
876; RV64I-NEXT:    addi sp, sp, -32
877; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
878; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
879; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
880; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
881; RV64I-NEXT:    mv s0, a0
882; RV64I-NEXT:    li a1, 0
883; RV64I-NEXT:    call __gesf2@plt
884; RV64I-NEXT:    mv s1, a0
885; RV64I-NEXT:    mv a0, s0
886; RV64I-NEXT:    call __fixunssfdi@plt
887; RV64I-NEXT:    li s2, 0
888; RV64I-NEXT:    bltz s1, .LBB14_2
889; RV64I-NEXT:  # %bb.1: # %start
890; RV64I-NEXT:    mv s2, a0
891; RV64I-NEXT:  .LBB14_2: # %start
892; RV64I-NEXT:    lui a0, 391168
893; RV64I-NEXT:    addiw a1, a0, -1
894; RV64I-NEXT:    mv a0, s0
895; RV64I-NEXT:    call __gtsf2@plt
896; RV64I-NEXT:    mv a1, a0
897; RV64I-NEXT:    li a0, -1
898; RV64I-NEXT:    bgtz a1, .LBB14_4
899; RV64I-NEXT:  # %bb.3: # %start
900; RV64I-NEXT:    mv a0, s2
901; RV64I-NEXT:  .LBB14_4: # %start
902; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
903; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
904; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
905; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
906; RV64I-NEXT:    addi sp, sp, 32
907; RV64I-NEXT:    ret
908start:
909  %0 = tail call i64 @llvm.fptoui.sat.i64.f32(float %a)
910  ret i64 %0
911}
912declare i64 @llvm.fptoui.sat.i64.f32(float)
913
914define float @fcvt_s_l(i64 %a) nounwind {
915; RV32IF-LABEL: fcvt_s_l:
916; RV32IF:       # %bb.0:
917; RV32IF-NEXT:    addi sp, sp, -16
918; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
919; RV32IF-NEXT:    call __floatdisf@plt
920; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
921; RV32IF-NEXT:    addi sp, sp, 16
922; RV32IF-NEXT:    ret
923;
924; RV64IF-LABEL: fcvt_s_l:
925; RV64IF:       # %bb.0:
926; RV64IF-NEXT:    fcvt.s.l fa0, a0
927; RV64IF-NEXT:    ret
928;
929; RV32I-LABEL: fcvt_s_l:
930; RV32I:       # %bb.0:
931; RV32I-NEXT:    addi sp, sp, -16
932; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
933; RV32I-NEXT:    call __floatdisf@plt
934; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
935; RV32I-NEXT:    addi sp, sp, 16
936; RV32I-NEXT:    ret
937;
938; RV64I-LABEL: fcvt_s_l:
939; RV64I:       # %bb.0:
940; RV64I-NEXT:    addi sp, sp, -16
941; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
942; RV64I-NEXT:    call __floatdisf@plt
943; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
944; RV64I-NEXT:    addi sp, sp, 16
945; RV64I-NEXT:    ret
946  %1 = sitofp i64 %a to float
947  ret float %1
948}
949
950define float @fcvt_s_lu(i64 %a) nounwind {
951; RV32IF-LABEL: fcvt_s_lu:
952; RV32IF:       # %bb.0:
953; RV32IF-NEXT:    addi sp, sp, -16
954; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
955; RV32IF-NEXT:    call __floatundisf@plt
956; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
957; RV32IF-NEXT:    addi sp, sp, 16
958; RV32IF-NEXT:    ret
959;
960; RV64IF-LABEL: fcvt_s_lu:
961; RV64IF:       # %bb.0:
962; RV64IF-NEXT:    fcvt.s.lu fa0, a0
963; RV64IF-NEXT:    ret
964;
965; RV32I-LABEL: fcvt_s_lu:
966; RV32I:       # %bb.0:
967; RV32I-NEXT:    addi sp, sp, -16
968; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
969; RV32I-NEXT:    call __floatundisf@plt
970; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
971; RV32I-NEXT:    addi sp, sp, 16
972; RV32I-NEXT:    ret
973;
974; RV64I-LABEL: fcvt_s_lu:
975; RV64I:       # %bb.0:
976; RV64I-NEXT:    addi sp, sp, -16
977; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
978; RV64I-NEXT:    call __floatundisf@plt
979; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
980; RV64I-NEXT:    addi sp, sp, 16
981; RV64I-NEXT:    ret
982  %1 = uitofp i64 %a to float
983  ret float %1
984}
985
986define float @fcvt_s_w_i8(i8 signext %a) nounwind {
987; CHECKIF-LABEL: fcvt_s_w_i8:
988; CHECKIF:       # %bb.0:
989; CHECKIF-NEXT:    fcvt.s.w fa0, a0
990; CHECKIF-NEXT:    ret
991;
992; RV32I-LABEL: fcvt_s_w_i8:
993; RV32I:       # %bb.0:
994; RV32I-NEXT:    addi sp, sp, -16
995; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
996; RV32I-NEXT:    call __floatsisf@plt
997; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
998; RV32I-NEXT:    addi sp, sp, 16
999; RV32I-NEXT:    ret
1000;
1001; RV64I-LABEL: fcvt_s_w_i8:
1002; RV64I:       # %bb.0:
1003; RV64I-NEXT:    addi sp, sp, -16
1004; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1005; RV64I-NEXT:    call __floatsisf@plt
1006; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1007; RV64I-NEXT:    addi sp, sp, 16
1008; RV64I-NEXT:    ret
1009  %1 = sitofp i8 %a to float
1010  ret float %1
1011}
1012
1013define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind {
1014; CHECKIF-LABEL: fcvt_s_wu_i8:
1015; CHECKIF:       # %bb.0:
1016; CHECKIF-NEXT:    fcvt.s.wu fa0, a0
1017; CHECKIF-NEXT:    ret
1018;
1019; RV32I-LABEL: fcvt_s_wu_i8:
1020; RV32I:       # %bb.0:
1021; RV32I-NEXT:    addi sp, sp, -16
1022; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1023; RV32I-NEXT:    call __floatunsisf@plt
1024; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1025; RV32I-NEXT:    addi sp, sp, 16
1026; RV32I-NEXT:    ret
1027;
1028; RV64I-LABEL: fcvt_s_wu_i8:
1029; RV64I:       # %bb.0:
1030; RV64I-NEXT:    addi sp, sp, -16
1031; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1032; RV64I-NEXT:    call __floatunsisf@plt
1033; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1034; RV64I-NEXT:    addi sp, sp, 16
1035; RV64I-NEXT:    ret
1036  %1 = uitofp i8 %a to float
1037  ret float %1
1038}
1039
1040define float @fcvt_s_w_i16(i16 signext %a) nounwind {
1041; CHECKIF-LABEL: fcvt_s_w_i16:
1042; CHECKIF:       # %bb.0:
1043; CHECKIF-NEXT:    fcvt.s.w fa0, a0
1044; CHECKIF-NEXT:    ret
1045;
1046; RV32I-LABEL: fcvt_s_w_i16:
1047; RV32I:       # %bb.0:
1048; RV32I-NEXT:    addi sp, sp, -16
1049; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1050; RV32I-NEXT:    call __floatsisf@plt
1051; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1052; RV32I-NEXT:    addi sp, sp, 16
1053; RV32I-NEXT:    ret
1054;
1055; RV64I-LABEL: fcvt_s_w_i16:
1056; RV64I:       # %bb.0:
1057; RV64I-NEXT:    addi sp, sp, -16
1058; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1059; RV64I-NEXT:    call __floatsisf@plt
1060; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1061; RV64I-NEXT:    addi sp, sp, 16
1062; RV64I-NEXT:    ret
1063  %1 = sitofp i16 %a to float
1064  ret float %1
1065}
1066
1067define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind {
1068; CHECKIF-LABEL: fcvt_s_wu_i16:
1069; CHECKIF:       # %bb.0:
1070; CHECKIF-NEXT:    fcvt.s.wu fa0, a0
1071; CHECKIF-NEXT:    ret
1072;
1073; RV32I-LABEL: fcvt_s_wu_i16:
1074; RV32I:       # %bb.0:
1075; RV32I-NEXT:    addi sp, sp, -16
1076; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1077; RV32I-NEXT:    call __floatunsisf@plt
1078; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1079; RV32I-NEXT:    addi sp, sp, 16
1080; RV32I-NEXT:    ret
1081;
1082; RV64I-LABEL: fcvt_s_wu_i16:
1083; RV64I:       # %bb.0:
1084; RV64I-NEXT:    addi sp, sp, -16
1085; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1086; RV64I-NEXT:    call __floatunsisf@plt
1087; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1088; RV64I-NEXT:    addi sp, sp, 16
1089; RV64I-NEXT:    ret
1090  %1 = uitofp i16 %a to float
1091  ret float %1
1092}
1093
1094; Make sure we select W version of addi on RV64.
1095define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, float* %1) nounwind {
1096; RV32IF-LABEL: fcvt_s_w_demanded_bits:
1097; RV32IF:       # %bb.0:
1098; RV32IF-NEXT:    addi a0, a0, 1
1099; RV32IF-NEXT:    fcvt.s.w ft0, a0
1100; RV32IF-NEXT:    fsw ft0, 0(a1)
1101; RV32IF-NEXT:    ret
1102;
1103; RV64IF-LABEL: fcvt_s_w_demanded_bits:
1104; RV64IF:       # %bb.0:
1105; RV64IF-NEXT:    addiw a0, a0, 1
1106; RV64IF-NEXT:    fcvt.s.w ft0, a0
1107; RV64IF-NEXT:    fsw ft0, 0(a1)
1108; RV64IF-NEXT:    ret
1109;
1110; RV32I-LABEL: fcvt_s_w_demanded_bits:
1111; RV32I:       # %bb.0:
1112; RV32I-NEXT:    addi sp, sp, -16
1113; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1114; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1115; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1116; RV32I-NEXT:    mv s0, a1
1117; RV32I-NEXT:    addi s1, a0, 1
1118; RV32I-NEXT:    mv a0, s1
1119; RV32I-NEXT:    call __floatsisf@plt
1120; RV32I-NEXT:    sw a0, 0(s0)
1121; RV32I-NEXT:    mv a0, s1
1122; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1123; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1124; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1125; RV32I-NEXT:    addi sp, sp, 16
1126; RV32I-NEXT:    ret
1127;
1128; RV64I-LABEL: fcvt_s_w_demanded_bits:
1129; RV64I:       # %bb.0:
1130; RV64I-NEXT:    addi sp, sp, -32
1131; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1132; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1133; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1134; RV64I-NEXT:    mv s0, a1
1135; RV64I-NEXT:    addiw s1, a0, 1
1136; RV64I-NEXT:    mv a0, s1
1137; RV64I-NEXT:    call __floatsisf@plt
1138; RV64I-NEXT:    sw a0, 0(s0)
1139; RV64I-NEXT:    mv a0, s1
1140; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1141; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1142; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1143; RV64I-NEXT:    addi sp, sp, 32
1144; RV64I-NEXT:    ret
1145  %3 = add i32 %0, 1
1146  %4 = sitofp i32 %3 to float
1147  store float %4, float* %1, align 4
1148  ret i32 %3
1149}
1150
1151; Make sure we select W version of addi on RV64.
1152define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, float* %1) nounwind {
1153; RV32IF-LABEL: fcvt_s_wu_demanded_bits:
1154; RV32IF:       # %bb.0:
1155; RV32IF-NEXT:    addi a0, a0, 1
1156; RV32IF-NEXT:    fcvt.s.wu ft0, a0
1157; RV32IF-NEXT:    fsw ft0, 0(a1)
1158; RV32IF-NEXT:    ret
1159;
1160; RV64IF-LABEL: fcvt_s_wu_demanded_bits:
1161; RV64IF:       # %bb.0:
1162; RV64IF-NEXT:    addiw a0, a0, 1
1163; RV64IF-NEXT:    fcvt.s.wu ft0, a0
1164; RV64IF-NEXT:    fsw ft0, 0(a1)
1165; RV64IF-NEXT:    ret
1166;
1167; RV32I-LABEL: fcvt_s_wu_demanded_bits:
1168; RV32I:       # %bb.0:
1169; RV32I-NEXT:    addi sp, sp, -16
1170; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1171; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1172; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1173; RV32I-NEXT:    mv s0, a1
1174; RV32I-NEXT:    addi s1, a0, 1
1175; RV32I-NEXT:    mv a0, s1
1176; RV32I-NEXT:    call __floatunsisf@plt
1177; RV32I-NEXT:    sw a0, 0(s0)
1178; RV32I-NEXT:    mv a0, s1
1179; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1180; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1181; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1182; RV32I-NEXT:    addi sp, sp, 16
1183; RV32I-NEXT:    ret
1184;
1185; RV64I-LABEL: fcvt_s_wu_demanded_bits:
1186; RV64I:       # %bb.0:
1187; RV64I-NEXT:    addi sp, sp, -32
1188; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1189; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1190; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1191; RV64I-NEXT:    mv s0, a1
1192; RV64I-NEXT:    addiw s1, a0, 1
1193; RV64I-NEXT:    mv a0, s1
1194; RV64I-NEXT:    call __floatunsisf@plt
1195; RV64I-NEXT:    sw a0, 0(s0)
1196; RV64I-NEXT:    mv a0, s1
1197; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1198; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1199; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1200; RV64I-NEXT:    addi sp, sp, 32
1201; RV64I-NEXT:    ret
1202  %3 = add i32 %0, 1
1203  %4 = uitofp i32 %3 to float
1204  store float %4, float* %1, align 4
1205  ret i32 %3
1206}
1207
1208define signext i16 @fcvt_w_s_i16(float %a) nounwind {
1209; RV32IF-LABEL: fcvt_w_s_i16:
1210; RV32IF:       # %bb.0:
1211; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
1212; RV32IF-NEXT:    ret
1213;
1214; RV64IF-LABEL: fcvt_w_s_i16:
1215; RV64IF:       # %bb.0:
1216; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
1217; RV64IF-NEXT:    ret
1218;
1219; RV32I-LABEL: fcvt_w_s_i16:
1220; RV32I:       # %bb.0:
1221; RV32I-NEXT:    addi sp, sp, -16
1222; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1223; RV32I-NEXT:    call __fixsfsi@plt
1224; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1225; RV32I-NEXT:    addi sp, sp, 16
1226; RV32I-NEXT:    ret
1227;
1228; RV64I-LABEL: fcvt_w_s_i16:
1229; RV64I:       # %bb.0:
1230; RV64I-NEXT:    addi sp, sp, -16
1231; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1232; RV64I-NEXT:    call __fixsfdi@plt
1233; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1234; RV64I-NEXT:    addi sp, sp, 16
1235; RV64I-NEXT:    ret
1236  %1 = fptosi float %a to i16
1237  ret i16 %1
1238}
1239
1240define signext i16 @fcvt_w_s_sat_i16(float %a) nounwind {
1241; RV32IF-LABEL: fcvt_w_s_sat_i16:
1242; RV32IF:       # %bb.0: # %start
1243; RV32IF-NEXT:    feq.s a0, fa0, fa0
1244; RV32IF-NEXT:    beqz a0, .LBB24_2
1245; RV32IF-NEXT:  # %bb.1:
1246; RV32IF-NEXT:    lui a0, %hi(.LCPI24_0)
1247; RV32IF-NEXT:    flw ft0, %lo(.LCPI24_0)(a0)
1248; RV32IF-NEXT:    lui a0, %hi(.LCPI24_1)
1249; RV32IF-NEXT:    flw ft1, %lo(.LCPI24_1)(a0)
1250; RV32IF-NEXT:    fmax.s ft0, fa0, ft0
1251; RV32IF-NEXT:    fmin.s ft0, ft0, ft1
1252; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
1253; RV32IF-NEXT:  .LBB24_2: # %start
1254; RV32IF-NEXT:    ret
1255;
1256; RV64IF-LABEL: fcvt_w_s_sat_i16:
1257; RV64IF:       # %bb.0: # %start
1258; RV64IF-NEXT:    feq.s a0, fa0, fa0
1259; RV64IF-NEXT:    beqz a0, .LBB24_2
1260; RV64IF-NEXT:  # %bb.1:
1261; RV64IF-NEXT:    lui a0, %hi(.LCPI24_0)
1262; RV64IF-NEXT:    flw ft0, %lo(.LCPI24_0)(a0)
1263; RV64IF-NEXT:    lui a0, %hi(.LCPI24_1)
1264; RV64IF-NEXT:    flw ft1, %lo(.LCPI24_1)(a0)
1265; RV64IF-NEXT:    fmax.s ft0, fa0, ft0
1266; RV64IF-NEXT:    fmin.s ft0, ft0, ft1
1267; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
1268; RV64IF-NEXT:  .LBB24_2: # %start
1269; RV64IF-NEXT:    ret
1270;
1271; RV32I-LABEL: fcvt_w_s_sat_i16:
1272; RV32I:       # %bb.0: # %start
1273; RV32I-NEXT:    addi sp, sp, -32
1274; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
1275; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
1276; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
1277; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
1278; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
1279; RV32I-NEXT:    mv s0, a0
1280; RV32I-NEXT:    lui a1, 815104
1281; RV32I-NEXT:    call __gesf2@plt
1282; RV32I-NEXT:    mv s1, a0
1283; RV32I-NEXT:    mv a0, s0
1284; RV32I-NEXT:    call __fixsfsi@plt
1285; RV32I-NEXT:    li s2, 0
1286; RV32I-NEXT:    lui s3, 1048568
1287; RV32I-NEXT:    bltz s1, .LBB24_2
1288; RV32I-NEXT:  # %bb.1: # %start
1289; RV32I-NEXT:    mv s3, a0
1290; RV32I-NEXT:  .LBB24_2: # %start
1291; RV32I-NEXT:    lui a0, 290816
1292; RV32I-NEXT:    addi a1, a0, -512
1293; RV32I-NEXT:    mv a0, s0
1294; RV32I-NEXT:    call __gtsf2@plt
1295; RV32I-NEXT:    bge s2, a0, .LBB24_4
1296; RV32I-NEXT:  # %bb.3:
1297; RV32I-NEXT:    lui a0, 8
1298; RV32I-NEXT:    addi s3, a0, -1
1299; RV32I-NEXT:  .LBB24_4: # %start
1300; RV32I-NEXT:    mv a0, s0
1301; RV32I-NEXT:    mv a1, s0
1302; RV32I-NEXT:    call __unordsf2@plt
1303; RV32I-NEXT:    bne a0, s2, .LBB24_6
1304; RV32I-NEXT:  # %bb.5: # %start
1305; RV32I-NEXT:    mv s2, s3
1306; RV32I-NEXT:  .LBB24_6: # %start
1307; RV32I-NEXT:    slli a0, s2, 16
1308; RV32I-NEXT:    srai a0, a0, 16
1309; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
1310; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
1311; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
1312; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
1313; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
1314; RV32I-NEXT:    addi sp, sp, 32
1315; RV32I-NEXT:    ret
1316;
1317; RV64I-LABEL: fcvt_w_s_sat_i16:
1318; RV64I:       # %bb.0: # %start
1319; RV64I-NEXT:    addi sp, sp, -48
1320; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
1321; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
1322; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
1323; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
1324; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
1325; RV64I-NEXT:    mv s0, a0
1326; RV64I-NEXT:    lui a1, 815104
1327; RV64I-NEXT:    call __gesf2@plt
1328; RV64I-NEXT:    mv s1, a0
1329; RV64I-NEXT:    mv a0, s0
1330; RV64I-NEXT:    call __fixsfdi@plt
1331; RV64I-NEXT:    li s2, 0
1332; RV64I-NEXT:    lui s3, 1048568
1333; RV64I-NEXT:    bltz s1, .LBB24_2
1334; RV64I-NEXT:  # %bb.1: # %start
1335; RV64I-NEXT:    mv s3, a0
1336; RV64I-NEXT:  .LBB24_2: # %start
1337; RV64I-NEXT:    lui a0, 290816
1338; RV64I-NEXT:    addiw a1, a0, -512
1339; RV64I-NEXT:    mv a0, s0
1340; RV64I-NEXT:    call __gtsf2@plt
1341; RV64I-NEXT:    bge s2, a0, .LBB24_4
1342; RV64I-NEXT:  # %bb.3:
1343; RV64I-NEXT:    lui a0, 8
1344; RV64I-NEXT:    addiw s3, a0, -1
1345; RV64I-NEXT:  .LBB24_4: # %start
1346; RV64I-NEXT:    mv a0, s0
1347; RV64I-NEXT:    mv a1, s0
1348; RV64I-NEXT:    call __unordsf2@plt
1349; RV64I-NEXT:    bne a0, s2, .LBB24_6
1350; RV64I-NEXT:  # %bb.5: # %start
1351; RV64I-NEXT:    mv s2, s3
1352; RV64I-NEXT:  .LBB24_6: # %start
1353; RV64I-NEXT:    slli a0, s2, 48
1354; RV64I-NEXT:    srai a0, a0, 48
1355; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
1356; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
1357; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
1358; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
1359; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
1360; RV64I-NEXT:    addi sp, sp, 48
1361; RV64I-NEXT:    ret
1362start:
1363  %0 = tail call i16 @llvm.fptosi.sat.i16.f32(float %a)
1364  ret i16 %0
1365}
1366declare i16 @llvm.fptosi.sat.i16.f32(float)
1367
1368define zeroext i16 @fcvt_wu_s_i16(float %a) nounwind {
1369; RV32IF-LABEL: fcvt_wu_s_i16:
1370; RV32IF:       # %bb.0:
1371; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
1372; RV32IF-NEXT:    ret
1373;
1374; RV64IF-LABEL: fcvt_wu_s_i16:
1375; RV64IF:       # %bb.0:
1376; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
1377; RV64IF-NEXT:    ret
1378;
1379; RV32I-LABEL: fcvt_wu_s_i16:
1380; RV32I:       # %bb.0:
1381; RV32I-NEXT:    addi sp, sp, -16
1382; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1383; RV32I-NEXT:    call __fixunssfsi@plt
1384; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1385; RV32I-NEXT:    addi sp, sp, 16
1386; RV32I-NEXT:    ret
1387;
1388; RV64I-LABEL: fcvt_wu_s_i16:
1389; RV64I:       # %bb.0:
1390; RV64I-NEXT:    addi sp, sp, -16
1391; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1392; RV64I-NEXT:    call __fixunssfdi@plt
1393; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1394; RV64I-NEXT:    addi sp, sp, 16
1395; RV64I-NEXT:    ret
1396  %1 = fptoui float %a to i16
1397  ret i16 %1
1398}
1399
1400define zeroext i16 @fcvt_wu_s_sat_i16(float %a) nounwind {
1401; RV32IF-LABEL: fcvt_wu_s_sat_i16:
1402; RV32IF:       # %bb.0: # %start
1403; RV32IF-NEXT:    lui a0, %hi(.LCPI26_0)
1404; RV32IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a0)
1405; RV32IF-NEXT:    fmv.w.x ft1, zero
1406; RV32IF-NEXT:    fmax.s ft1, fa0, ft1
1407; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
1408; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
1409; RV32IF-NEXT:    ret
1410;
1411; RV64IF-LABEL: fcvt_wu_s_sat_i16:
1412; RV64IF:       # %bb.0: # %start
1413; RV64IF-NEXT:    lui a0, %hi(.LCPI26_0)
1414; RV64IF-NEXT:    flw ft0, %lo(.LCPI26_0)(a0)
1415; RV64IF-NEXT:    fmv.w.x ft1, zero
1416; RV64IF-NEXT:    fmax.s ft1, fa0, ft1
1417; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
1418; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
1419; RV64IF-NEXT:    ret
1420;
1421; RV32I-LABEL: fcvt_wu_s_sat_i16:
1422; RV32I:       # %bb.0: # %start
1423; RV32I-NEXT:    addi sp, sp, -16
1424; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1425; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1426; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1427; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
1428; RV32I-NEXT:    mv s0, a0
1429; RV32I-NEXT:    li a1, 0
1430; RV32I-NEXT:    call __gesf2@plt
1431; RV32I-NEXT:    mv s1, a0
1432; RV32I-NEXT:    mv a0, s0
1433; RV32I-NEXT:    call __fixunssfsi@plt
1434; RV32I-NEXT:    li s2, 0
1435; RV32I-NEXT:    bltz s1, .LBB26_2
1436; RV32I-NEXT:  # %bb.1: # %start
1437; RV32I-NEXT:    mv s2, a0
1438; RV32I-NEXT:  .LBB26_2: # %start
1439; RV32I-NEXT:    lui a0, 292864
1440; RV32I-NEXT:    addi a1, a0, -256
1441; RV32I-NEXT:    mv a0, s0
1442; RV32I-NEXT:    call __gtsf2@plt
1443; RV32I-NEXT:    lui a1, 16
1444; RV32I-NEXT:    addi a1, a1, -1
1445; RV32I-NEXT:    mv a2, a1
1446; RV32I-NEXT:    bgtz a0, .LBB26_4
1447; RV32I-NEXT:  # %bb.3: # %start
1448; RV32I-NEXT:    mv a2, s2
1449; RV32I-NEXT:  .LBB26_4: # %start
1450; RV32I-NEXT:    and a0, a2, a1
1451; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1452; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1453; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1454; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
1455; RV32I-NEXT:    addi sp, sp, 16
1456; RV32I-NEXT:    ret
1457;
1458; RV64I-LABEL: fcvt_wu_s_sat_i16:
1459; RV64I:       # %bb.0: # %start
1460; RV64I-NEXT:    addi sp, sp, -32
1461; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1462; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1463; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1464; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
1465; RV64I-NEXT:    mv s0, a0
1466; RV64I-NEXT:    li a1, 0
1467; RV64I-NEXT:    call __gesf2@plt
1468; RV64I-NEXT:    mv s1, a0
1469; RV64I-NEXT:    mv a0, s0
1470; RV64I-NEXT:    call __fixunssfdi@plt
1471; RV64I-NEXT:    li s2, 0
1472; RV64I-NEXT:    bltz s1, .LBB26_2
1473; RV64I-NEXT:  # %bb.1: # %start
1474; RV64I-NEXT:    mv s2, a0
1475; RV64I-NEXT:  .LBB26_2: # %start
1476; RV64I-NEXT:    lui a0, 292864
1477; RV64I-NEXT:    addiw a1, a0, -256
1478; RV64I-NEXT:    mv a0, s0
1479; RV64I-NEXT:    call __gtsf2@plt
1480; RV64I-NEXT:    lui a1, 16
1481; RV64I-NEXT:    addiw a1, a1, -1
1482; RV64I-NEXT:    mv a2, a1
1483; RV64I-NEXT:    bgtz a0, .LBB26_4
1484; RV64I-NEXT:  # %bb.3: # %start
1485; RV64I-NEXT:    mv a2, s2
1486; RV64I-NEXT:  .LBB26_4: # %start
1487; RV64I-NEXT:    and a0, a2, a1
1488; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1489; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1490; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1491; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
1492; RV64I-NEXT:    addi sp, sp, 32
1493; RV64I-NEXT:    ret
1494start:
1495  %0 = tail call i16 @llvm.fptoui.sat.i16.f32(float %a)
1496  ret i16 %0
1497}
1498declare i16 @llvm.fptoui.sat.i16.f32(float)
1499
1500define signext i8 @fcvt_w_s_i8(float %a) nounwind {
1501; RV32IF-LABEL: fcvt_w_s_i8:
1502; RV32IF:       # %bb.0:
1503; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
1504; RV32IF-NEXT:    ret
1505;
1506; RV64IF-LABEL: fcvt_w_s_i8:
1507; RV64IF:       # %bb.0:
1508; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
1509; RV64IF-NEXT:    ret
1510;
1511; RV32I-LABEL: fcvt_w_s_i8:
1512; RV32I:       # %bb.0:
1513; RV32I-NEXT:    addi sp, sp, -16
1514; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1515; RV32I-NEXT:    call __fixsfsi@plt
1516; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1517; RV32I-NEXT:    addi sp, sp, 16
1518; RV32I-NEXT:    ret
1519;
1520; RV64I-LABEL: fcvt_w_s_i8:
1521; RV64I:       # %bb.0:
1522; RV64I-NEXT:    addi sp, sp, -16
1523; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1524; RV64I-NEXT:    call __fixsfdi@plt
1525; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1526; RV64I-NEXT:    addi sp, sp, 16
1527; RV64I-NEXT:    ret
1528  %1 = fptosi float %a to i8
1529  ret i8 %1
1530}
1531
1532define signext i8 @fcvt_w_s_sat_i8(float %a) nounwind {
1533; RV32IF-LABEL: fcvt_w_s_sat_i8:
1534; RV32IF:       # %bb.0: # %start
1535; RV32IF-NEXT:    feq.s a0, fa0, fa0
1536; RV32IF-NEXT:    beqz a0, .LBB28_2
1537; RV32IF-NEXT:  # %bb.1:
1538; RV32IF-NEXT:    lui a0, %hi(.LCPI28_0)
1539; RV32IF-NEXT:    flw ft0, %lo(.LCPI28_0)(a0)
1540; RV32IF-NEXT:    lui a0, %hi(.LCPI28_1)
1541; RV32IF-NEXT:    flw ft1, %lo(.LCPI28_1)(a0)
1542; RV32IF-NEXT:    fmax.s ft0, fa0, ft0
1543; RV32IF-NEXT:    fmin.s ft0, ft0, ft1
1544; RV32IF-NEXT:    fcvt.w.s a0, ft0, rtz
1545; RV32IF-NEXT:  .LBB28_2: # %start
1546; RV32IF-NEXT:    ret
1547;
1548; RV64IF-LABEL: fcvt_w_s_sat_i8:
1549; RV64IF:       # %bb.0: # %start
1550; RV64IF-NEXT:    feq.s a0, fa0, fa0
1551; RV64IF-NEXT:    beqz a0, .LBB28_2
1552; RV64IF-NEXT:  # %bb.1:
1553; RV64IF-NEXT:    lui a0, %hi(.LCPI28_0)
1554; RV64IF-NEXT:    flw ft0, %lo(.LCPI28_0)(a0)
1555; RV64IF-NEXT:    lui a0, %hi(.LCPI28_1)
1556; RV64IF-NEXT:    flw ft1, %lo(.LCPI28_1)(a0)
1557; RV64IF-NEXT:    fmax.s ft0, fa0, ft0
1558; RV64IF-NEXT:    fmin.s ft0, ft0, ft1
1559; RV64IF-NEXT:    fcvt.l.s a0, ft0, rtz
1560; RV64IF-NEXT:  .LBB28_2: # %start
1561; RV64IF-NEXT:    ret
1562;
1563; RV32I-LABEL: fcvt_w_s_sat_i8:
1564; RV32I:       # %bb.0: # %start
1565; RV32I-NEXT:    addi sp, sp, -32
1566; RV32I-NEXT:    sw ra, 28(sp) # 4-byte Folded Spill
1567; RV32I-NEXT:    sw s0, 24(sp) # 4-byte Folded Spill
1568; RV32I-NEXT:    sw s1, 20(sp) # 4-byte Folded Spill
1569; RV32I-NEXT:    sw s2, 16(sp) # 4-byte Folded Spill
1570; RV32I-NEXT:    sw s3, 12(sp) # 4-byte Folded Spill
1571; RV32I-NEXT:    mv s0, a0
1572; RV32I-NEXT:    lui a1, 798720
1573; RV32I-NEXT:    call __gesf2@plt
1574; RV32I-NEXT:    mv s1, a0
1575; RV32I-NEXT:    mv a0, s0
1576; RV32I-NEXT:    call __fixsfsi@plt
1577; RV32I-NEXT:    li s2, 0
1578; RV32I-NEXT:    li s3, -128
1579; RV32I-NEXT:    bltz s1, .LBB28_2
1580; RV32I-NEXT:  # %bb.1: # %start
1581; RV32I-NEXT:    mv s3, a0
1582; RV32I-NEXT:  .LBB28_2: # %start
1583; RV32I-NEXT:    lui a1, 274400
1584; RV32I-NEXT:    mv a0, s0
1585; RV32I-NEXT:    call __gtsf2@plt
1586; RV32I-NEXT:    li s1, 127
1587; RV32I-NEXT:    blt s2, a0, .LBB28_4
1588; RV32I-NEXT:  # %bb.3: # %start
1589; RV32I-NEXT:    mv s1, s3
1590; RV32I-NEXT:  .LBB28_4: # %start
1591; RV32I-NEXT:    mv a0, s0
1592; RV32I-NEXT:    mv a1, s0
1593; RV32I-NEXT:    call __unordsf2@plt
1594; RV32I-NEXT:    bne a0, s2, .LBB28_6
1595; RV32I-NEXT:  # %bb.5: # %start
1596; RV32I-NEXT:    mv s2, s1
1597; RV32I-NEXT:  .LBB28_6: # %start
1598; RV32I-NEXT:    slli a0, s2, 24
1599; RV32I-NEXT:    srai a0, a0, 24
1600; RV32I-NEXT:    lw ra, 28(sp) # 4-byte Folded Reload
1601; RV32I-NEXT:    lw s0, 24(sp) # 4-byte Folded Reload
1602; RV32I-NEXT:    lw s1, 20(sp) # 4-byte Folded Reload
1603; RV32I-NEXT:    lw s2, 16(sp) # 4-byte Folded Reload
1604; RV32I-NEXT:    lw s3, 12(sp) # 4-byte Folded Reload
1605; RV32I-NEXT:    addi sp, sp, 32
1606; RV32I-NEXT:    ret
1607;
1608; RV64I-LABEL: fcvt_w_s_sat_i8:
1609; RV64I:       # %bb.0: # %start
1610; RV64I-NEXT:    addi sp, sp, -48
1611; RV64I-NEXT:    sd ra, 40(sp) # 8-byte Folded Spill
1612; RV64I-NEXT:    sd s0, 32(sp) # 8-byte Folded Spill
1613; RV64I-NEXT:    sd s1, 24(sp) # 8-byte Folded Spill
1614; RV64I-NEXT:    sd s2, 16(sp) # 8-byte Folded Spill
1615; RV64I-NEXT:    sd s3, 8(sp) # 8-byte Folded Spill
1616; RV64I-NEXT:    mv s0, a0
1617; RV64I-NEXT:    lui a1, 798720
1618; RV64I-NEXT:    call __gesf2@plt
1619; RV64I-NEXT:    mv s1, a0
1620; RV64I-NEXT:    mv a0, s0
1621; RV64I-NEXT:    call __fixsfdi@plt
1622; RV64I-NEXT:    li s2, 0
1623; RV64I-NEXT:    li s3, -128
1624; RV64I-NEXT:    bltz s1, .LBB28_2
1625; RV64I-NEXT:  # %bb.1: # %start
1626; RV64I-NEXT:    mv s3, a0
1627; RV64I-NEXT:  .LBB28_2: # %start
1628; RV64I-NEXT:    lui a1, 274400
1629; RV64I-NEXT:    mv a0, s0
1630; RV64I-NEXT:    call __gtsf2@plt
1631; RV64I-NEXT:    li s1, 127
1632; RV64I-NEXT:    blt s2, a0, .LBB28_4
1633; RV64I-NEXT:  # %bb.3: # %start
1634; RV64I-NEXT:    mv s1, s3
1635; RV64I-NEXT:  .LBB28_4: # %start
1636; RV64I-NEXT:    mv a0, s0
1637; RV64I-NEXT:    mv a1, s0
1638; RV64I-NEXT:    call __unordsf2@plt
1639; RV64I-NEXT:    bne a0, s2, .LBB28_6
1640; RV64I-NEXT:  # %bb.5: # %start
1641; RV64I-NEXT:    mv s2, s1
1642; RV64I-NEXT:  .LBB28_6: # %start
1643; RV64I-NEXT:    slli a0, s2, 56
1644; RV64I-NEXT:    srai a0, a0, 56
1645; RV64I-NEXT:    ld ra, 40(sp) # 8-byte Folded Reload
1646; RV64I-NEXT:    ld s0, 32(sp) # 8-byte Folded Reload
1647; RV64I-NEXT:    ld s1, 24(sp) # 8-byte Folded Reload
1648; RV64I-NEXT:    ld s2, 16(sp) # 8-byte Folded Reload
1649; RV64I-NEXT:    ld s3, 8(sp) # 8-byte Folded Reload
1650; RV64I-NEXT:    addi sp, sp, 48
1651; RV64I-NEXT:    ret
1652start:
1653  %0 = tail call i8 @llvm.fptosi.sat.i8.f32(float %a)
1654  ret i8 %0
1655}
1656declare i8 @llvm.fptosi.sat.i8.f32(float)
1657
1658define zeroext i8 @fcvt_wu_s_i8(float %a) nounwind {
1659; RV32IF-LABEL: fcvt_wu_s_i8:
1660; RV32IF:       # %bb.0:
1661; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
1662; RV32IF-NEXT:    ret
1663;
1664; RV64IF-LABEL: fcvt_wu_s_i8:
1665; RV64IF:       # %bb.0:
1666; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
1667; RV64IF-NEXT:    ret
1668;
1669; RV32I-LABEL: fcvt_wu_s_i8:
1670; RV32I:       # %bb.0:
1671; RV32I-NEXT:    addi sp, sp, -16
1672; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1673; RV32I-NEXT:    call __fixunssfsi@plt
1674; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1675; RV32I-NEXT:    addi sp, sp, 16
1676; RV32I-NEXT:    ret
1677;
1678; RV64I-LABEL: fcvt_wu_s_i8:
1679; RV64I:       # %bb.0:
1680; RV64I-NEXT:    addi sp, sp, -16
1681; RV64I-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
1682; RV64I-NEXT:    call __fixunssfdi@plt
1683; RV64I-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
1684; RV64I-NEXT:    addi sp, sp, 16
1685; RV64I-NEXT:    ret
1686  %1 = fptoui float %a to i8
1687  ret i8 %1
1688}
1689
1690define zeroext i8 @fcvt_wu_s_sat_i8(float %a) nounwind {
1691; RV32IF-LABEL: fcvt_wu_s_sat_i8:
1692; RV32IF:       # %bb.0: # %start
1693; RV32IF-NEXT:    lui a0, %hi(.LCPI30_0)
1694; RV32IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a0)
1695; RV32IF-NEXT:    fmv.w.x ft1, zero
1696; RV32IF-NEXT:    fmax.s ft1, fa0, ft1
1697; RV32IF-NEXT:    fmin.s ft0, ft1, ft0
1698; RV32IF-NEXT:    fcvt.wu.s a0, ft0, rtz
1699; RV32IF-NEXT:    ret
1700;
1701; RV64IF-LABEL: fcvt_wu_s_sat_i8:
1702; RV64IF:       # %bb.0: # %start
1703; RV64IF-NEXT:    lui a0, %hi(.LCPI30_0)
1704; RV64IF-NEXT:    flw ft0, %lo(.LCPI30_0)(a0)
1705; RV64IF-NEXT:    fmv.w.x ft1, zero
1706; RV64IF-NEXT:    fmax.s ft1, fa0, ft1
1707; RV64IF-NEXT:    fmin.s ft0, ft1, ft0
1708; RV64IF-NEXT:    fcvt.lu.s a0, ft0, rtz
1709; RV64IF-NEXT:    ret
1710;
1711; RV32I-LABEL: fcvt_wu_s_sat_i8:
1712; RV32I:       # %bb.0: # %start
1713; RV32I-NEXT:    addi sp, sp, -16
1714; RV32I-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
1715; RV32I-NEXT:    sw s0, 8(sp) # 4-byte Folded Spill
1716; RV32I-NEXT:    sw s1, 4(sp) # 4-byte Folded Spill
1717; RV32I-NEXT:    sw s2, 0(sp) # 4-byte Folded Spill
1718; RV32I-NEXT:    mv s0, a0
1719; RV32I-NEXT:    li a1, 0
1720; RV32I-NEXT:    call __gesf2@plt
1721; RV32I-NEXT:    mv s1, a0
1722; RV32I-NEXT:    mv a0, s0
1723; RV32I-NEXT:    call __fixunssfsi@plt
1724; RV32I-NEXT:    li s2, 0
1725; RV32I-NEXT:    bltz s1, .LBB30_2
1726; RV32I-NEXT:  # %bb.1: # %start
1727; RV32I-NEXT:    mv s2, a0
1728; RV32I-NEXT:  .LBB30_2: # %start
1729; RV32I-NEXT:    lui a1, 276464
1730; RV32I-NEXT:    mv a0, s0
1731; RV32I-NEXT:    call __gtsf2@plt
1732; RV32I-NEXT:    li a1, 255
1733; RV32I-NEXT:    bgtz a0, .LBB30_4
1734; RV32I-NEXT:  # %bb.3: # %start
1735; RV32I-NEXT:    mv a1, s2
1736; RV32I-NEXT:  .LBB30_4: # %start
1737; RV32I-NEXT:    andi a0, a1, 255
1738; RV32I-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
1739; RV32I-NEXT:    lw s0, 8(sp) # 4-byte Folded Reload
1740; RV32I-NEXT:    lw s1, 4(sp) # 4-byte Folded Reload
1741; RV32I-NEXT:    lw s2, 0(sp) # 4-byte Folded Reload
1742; RV32I-NEXT:    addi sp, sp, 16
1743; RV32I-NEXT:    ret
1744;
1745; RV64I-LABEL: fcvt_wu_s_sat_i8:
1746; RV64I:       # %bb.0: # %start
1747; RV64I-NEXT:    addi sp, sp, -32
1748; RV64I-NEXT:    sd ra, 24(sp) # 8-byte Folded Spill
1749; RV64I-NEXT:    sd s0, 16(sp) # 8-byte Folded Spill
1750; RV64I-NEXT:    sd s1, 8(sp) # 8-byte Folded Spill
1751; RV64I-NEXT:    sd s2, 0(sp) # 8-byte Folded Spill
1752; RV64I-NEXT:    mv s0, a0
1753; RV64I-NEXT:    li a1, 0
1754; RV64I-NEXT:    call __gesf2@plt
1755; RV64I-NEXT:    mv s1, a0
1756; RV64I-NEXT:    mv a0, s0
1757; RV64I-NEXT:    call __fixunssfdi@plt
1758; RV64I-NEXT:    li s2, 0
1759; RV64I-NEXT:    bltz s1, .LBB30_2
1760; RV64I-NEXT:  # %bb.1: # %start
1761; RV64I-NEXT:    mv s2, a0
1762; RV64I-NEXT:  .LBB30_2: # %start
1763; RV64I-NEXT:    lui a1, 276464
1764; RV64I-NEXT:    mv a0, s0
1765; RV64I-NEXT:    call __gtsf2@plt
1766; RV64I-NEXT:    li a1, 255
1767; RV64I-NEXT:    bgtz a0, .LBB30_4
1768; RV64I-NEXT:  # %bb.3: # %start
1769; RV64I-NEXT:    mv a1, s2
1770; RV64I-NEXT:  .LBB30_4: # %start
1771; RV64I-NEXT:    andi a0, a1, 255
1772; RV64I-NEXT:    ld ra, 24(sp) # 8-byte Folded Reload
1773; RV64I-NEXT:    ld s0, 16(sp) # 8-byte Folded Reload
1774; RV64I-NEXT:    ld s1, 8(sp) # 8-byte Folded Reload
1775; RV64I-NEXT:    ld s2, 0(sp) # 8-byte Folded Reload
1776; RV64I-NEXT:    addi sp, sp, 32
1777; RV64I-NEXT:    ret
1778start:
1779  %0 = tail call i8 @llvm.fptoui.sat.i8.f32(float %a)
1780  ret i8 %0
1781}
1782declare i8 @llvm.fptoui.sat.i8.f32(float)
1783