1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
4; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
6
7define signext i8 @test_floor_si8(float %x) {
8; RV32IF-LABEL: test_floor_si8:
9; RV32IF:       # %bb.0:
10; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
11; RV32IF-NEXT:    ret
12;
13; RV64IF-LABEL: test_floor_si8:
14; RV64IF:       # %bb.0:
15; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
16; RV64IF-NEXT:    ret
17  %a = call float @llvm.floor.f32(float %x)
18  %b = fptosi float %a to i8
19  ret i8 %b
20}
21
22define signext i16 @test_floor_si16(float %x) {
23; RV32IF-LABEL: test_floor_si16:
24; RV32IF:       # %bb.0:
25; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
26; RV32IF-NEXT:    ret
27;
28; RV64IF-LABEL: test_floor_si16:
29; RV64IF:       # %bb.0:
30; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
31; RV64IF-NEXT:    ret
32  %a = call float @llvm.floor.f32(float %x)
33  %b = fptosi float %a to i16
34  ret i16 %b
35}
36
37define signext i32 @test_floor_si32(float %x) {
38; RV32IF-LABEL: test_floor_si32:
39; RV32IF:       # %bb.0:
40; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
41; RV32IF-NEXT:    ret
42;
43; RV64IF-LABEL: test_floor_si32:
44; RV64IF:       # %bb.0:
45; RV64IF-NEXT:    fcvt.w.s a0, fa0, rdn
46; RV64IF-NEXT:    ret
47  %a = call float @llvm.floor.f32(float %x)
48  %b = fptosi float %a to i32
49  ret i32 %b
50}
51
52define i64 @test_floor_si64(float %x) {
53; RV32IF-LABEL: test_floor_si64:
54; RV32IF:       # %bb.0:
55; RV32IF-NEXT:    addi sp, sp, -16
56; RV32IF-NEXT:    .cfi_def_cfa_offset 16
57; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
58; RV32IF-NEXT:    .cfi_offset ra, -4
59; RV32IF-NEXT:    call floorf@plt
60; RV32IF-NEXT:    call __fixsfdi@plt
61; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
62; RV32IF-NEXT:    addi sp, sp, 16
63; RV32IF-NEXT:    ret
64;
65; RV64IF-LABEL: test_floor_si64:
66; RV64IF:       # %bb.0:
67; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
68; RV64IF-NEXT:    ret
69  %a = call float @llvm.floor.f32(float %x)
70  %b = fptosi float %a to i64
71  ret i64 %b
72}
73
74define zeroext i8 @test_floor_ui8(float %x) {
75; RV32IF-LABEL: test_floor_ui8:
76; RV32IF:       # %bb.0:
77; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
78; RV32IF-NEXT:    ret
79;
80; RV64IF-LABEL: test_floor_ui8:
81; RV64IF:       # %bb.0:
82; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
83; RV64IF-NEXT:    ret
84  %a = call float @llvm.floor.f32(float %x)
85  %b = fptoui float %a to i8
86  ret i8 %b
87}
88
89define zeroext i16 @test_floor_ui16(float %x) {
90; RV32IF-LABEL: test_floor_ui16:
91; RV32IF:       # %bb.0:
92; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
93; RV32IF-NEXT:    ret
94;
95; RV64IF-LABEL: test_floor_ui16:
96; RV64IF:       # %bb.0:
97; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
98; RV64IF-NEXT:    ret
99  %a = call float @llvm.floor.f32(float %x)
100  %b = fptoui float %a to i16
101  ret i16 %b
102}
103
104define signext i32 @test_floor_ui32(float %x) {
105; RV32IF-LABEL: test_floor_ui32:
106; RV32IF:       # %bb.0:
107; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
108; RV32IF-NEXT:    ret
109;
110; RV64IF-LABEL: test_floor_ui32:
111; RV64IF:       # %bb.0:
112; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rdn
113; RV64IF-NEXT:    ret
114  %a = call float @llvm.floor.f32(float %x)
115  %b = fptoui float %a to i32
116  ret i32 %b
117}
118
119define i64 @test_floor_ui64(float %x) {
120; RV32IF-LABEL: test_floor_ui64:
121; RV32IF:       # %bb.0:
122; RV32IF-NEXT:    addi sp, sp, -16
123; RV32IF-NEXT:    .cfi_def_cfa_offset 16
124; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
125; RV32IF-NEXT:    .cfi_offset ra, -4
126; RV32IF-NEXT:    call floorf@plt
127; RV32IF-NEXT:    call __fixunssfdi@plt
128; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
129; RV32IF-NEXT:    addi sp, sp, 16
130; RV32IF-NEXT:    ret
131;
132; RV64IF-LABEL: test_floor_ui64:
133; RV64IF:       # %bb.0:
134; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
135; RV64IF-NEXT:    ret
136  %a = call float @llvm.floor.f32(float %x)
137  %b = fptoui float %a to i64
138  ret i64 %b
139}
140
141define signext i8 @test_ceil_si8(float %x) {
142; RV32IF-LABEL: test_ceil_si8:
143; RV32IF:       # %bb.0:
144; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
145; RV32IF-NEXT:    ret
146;
147; RV64IF-LABEL: test_ceil_si8:
148; RV64IF:       # %bb.0:
149; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
150; RV64IF-NEXT:    ret
151  %a = call float @llvm.ceil.f32(float %x)
152  %b = fptosi float %a to i8
153  ret i8 %b
154}
155
156define signext i16 @test_ceil_si16(float %x) {
157; RV32IF-LABEL: test_ceil_si16:
158; RV32IF:       # %bb.0:
159; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
160; RV32IF-NEXT:    ret
161;
162; RV64IF-LABEL: test_ceil_si16:
163; RV64IF:       # %bb.0:
164; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
165; RV64IF-NEXT:    ret
166  %a = call float @llvm.ceil.f32(float %x)
167  %b = fptosi float %a to i16
168  ret i16 %b
169}
170
171define signext i32 @test_ceil_si32(float %x) {
172; RV32IF-LABEL: test_ceil_si32:
173; RV32IF:       # %bb.0:
174; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
175; RV32IF-NEXT:    ret
176;
177; RV64IF-LABEL: test_ceil_si32:
178; RV64IF:       # %bb.0:
179; RV64IF-NEXT:    fcvt.w.s a0, fa0, rup
180; RV64IF-NEXT:    ret
181  %a = call float @llvm.ceil.f32(float %x)
182  %b = fptosi float %a to i32
183  ret i32 %b
184}
185
186define i64 @test_ceil_si64(float %x) {
187; RV32IF-LABEL: test_ceil_si64:
188; RV32IF:       # %bb.0:
189; RV32IF-NEXT:    addi sp, sp, -16
190; RV32IF-NEXT:    .cfi_def_cfa_offset 16
191; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
192; RV32IF-NEXT:    .cfi_offset ra, -4
193; RV32IF-NEXT:    call ceilf@plt
194; RV32IF-NEXT:    call __fixsfdi@plt
195; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
196; RV32IF-NEXT:    addi sp, sp, 16
197; RV32IF-NEXT:    ret
198;
199; RV64IF-LABEL: test_ceil_si64:
200; RV64IF:       # %bb.0:
201; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
202; RV64IF-NEXT:    ret
203  %a = call float @llvm.ceil.f32(float %x)
204  %b = fptosi float %a to i64
205  ret i64 %b
206}
207
208define zeroext i8 @test_ceil_ui8(float %x) {
209; RV32IF-LABEL: test_ceil_ui8:
210; RV32IF:       # %bb.0:
211; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
212; RV32IF-NEXT:    ret
213;
214; RV64IF-LABEL: test_ceil_ui8:
215; RV64IF:       # %bb.0:
216; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
217; RV64IF-NEXT:    ret
218  %a = call float @llvm.ceil.f32(float %x)
219  %b = fptoui float %a to i8
220  ret i8 %b
221}
222
223define zeroext i16 @test_ceil_ui16(float %x) {
224; RV32IF-LABEL: test_ceil_ui16:
225; RV32IF:       # %bb.0:
226; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
227; RV32IF-NEXT:    ret
228;
229; RV64IF-LABEL: test_ceil_ui16:
230; RV64IF:       # %bb.0:
231; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
232; RV64IF-NEXT:    ret
233  %a = call float @llvm.ceil.f32(float %x)
234  %b = fptoui float %a to i16
235  ret i16 %b
236}
237
238define signext i32 @test_ceil_ui32(float %x) {
239; RV32IF-LABEL: test_ceil_ui32:
240; RV32IF:       # %bb.0:
241; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
242; RV32IF-NEXT:    ret
243;
244; RV64IF-LABEL: test_ceil_ui32:
245; RV64IF:       # %bb.0:
246; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rup
247; RV64IF-NEXT:    ret
248  %a = call float @llvm.ceil.f32(float %x)
249  %b = fptoui float %a to i32
250  ret i32 %b
251}
252
253define i64 @test_ceil_ui64(float %x) {
254; RV32IF-LABEL: test_ceil_ui64:
255; RV32IF:       # %bb.0:
256; RV32IF-NEXT:    addi sp, sp, -16
257; RV32IF-NEXT:    .cfi_def_cfa_offset 16
258; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
259; RV32IF-NEXT:    .cfi_offset ra, -4
260; RV32IF-NEXT:    call ceilf@plt
261; RV32IF-NEXT:    call __fixunssfdi@plt
262; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
263; RV32IF-NEXT:    addi sp, sp, 16
264; RV32IF-NEXT:    ret
265;
266; RV64IF-LABEL: test_ceil_ui64:
267; RV64IF:       # %bb.0:
268; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
269; RV64IF-NEXT:    ret
270  %a = call float @llvm.ceil.f32(float %x)
271  %b = fptoui float %a to i64
272  ret i64 %b
273}
274
275define signext i8 @test_trunc_si8(float %x) {
276; RV32IF-LABEL: test_trunc_si8:
277; RV32IF:       # %bb.0:
278; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
279; RV32IF-NEXT:    ret
280;
281; RV64IF-LABEL: test_trunc_si8:
282; RV64IF:       # %bb.0:
283; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
284; RV64IF-NEXT:    ret
285  %a = call float @llvm.trunc.f32(float %x)
286  %b = fptosi float %a to i8
287  ret i8 %b
288}
289
290define signext i16 @test_trunc_si16(float %x) {
291; RV32IF-LABEL: test_trunc_si16:
292; RV32IF:       # %bb.0:
293; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
294; RV32IF-NEXT:    ret
295;
296; RV64IF-LABEL: test_trunc_si16:
297; RV64IF:       # %bb.0:
298; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
299; RV64IF-NEXT:    ret
300  %a = call float @llvm.trunc.f32(float %x)
301  %b = fptosi float %a to i16
302  ret i16 %b
303}
304
305define signext i32 @test_trunc_si32(float %x) {
306; RV32IF-LABEL: test_trunc_si32:
307; RV32IF:       # %bb.0:
308; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
309; RV32IF-NEXT:    ret
310;
311; RV64IF-LABEL: test_trunc_si32:
312; RV64IF:       # %bb.0:
313; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
314; RV64IF-NEXT:    ret
315  %a = call float @llvm.trunc.f32(float %x)
316  %b = fptosi float %a to i32
317  ret i32 %b
318}
319
320define i64 @test_trunc_si64(float %x) {
321; RV32IF-LABEL: test_trunc_si64:
322; RV32IF:       # %bb.0:
323; RV32IF-NEXT:    addi sp, sp, -16
324; RV32IF-NEXT:    .cfi_def_cfa_offset 16
325; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
326; RV32IF-NEXT:    .cfi_offset ra, -4
327; RV32IF-NEXT:    call truncf@plt
328; RV32IF-NEXT:    call __fixsfdi@plt
329; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
330; RV32IF-NEXT:    addi sp, sp, 16
331; RV32IF-NEXT:    ret
332;
333; RV64IF-LABEL: test_trunc_si64:
334; RV64IF:       # %bb.0:
335; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
336; RV64IF-NEXT:    ret
337  %a = call float @llvm.trunc.f32(float %x)
338  %b = fptosi float %a to i64
339  ret i64 %b
340}
341
342define zeroext i8 @test_trunc_ui8(float %x) {
343; RV32IF-LABEL: test_trunc_ui8:
344; RV32IF:       # %bb.0:
345; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
346; RV32IF-NEXT:    ret
347;
348; RV64IF-LABEL: test_trunc_ui8:
349; RV64IF:       # %bb.0:
350; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
351; RV64IF-NEXT:    ret
352  %a = call float @llvm.trunc.f32(float %x)
353  %b = fptoui float %a to i8
354  ret i8 %b
355}
356
357define zeroext i16 @test_trunc_ui16(float %x) {
358; RV32IF-LABEL: test_trunc_ui16:
359; RV32IF:       # %bb.0:
360; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
361; RV32IF-NEXT:    ret
362;
363; RV64IF-LABEL: test_trunc_ui16:
364; RV64IF:       # %bb.0:
365; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
366; RV64IF-NEXT:    ret
367  %a = call float @llvm.trunc.f32(float %x)
368  %b = fptoui float %a to i16
369  ret i16 %b
370}
371
372define signext i32 @test_trunc_ui32(float %x) {
373; RV32IF-LABEL: test_trunc_ui32:
374; RV32IF:       # %bb.0:
375; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
376; RV32IF-NEXT:    ret
377;
378; RV64IF-LABEL: test_trunc_ui32:
379; RV64IF:       # %bb.0:
380; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
381; RV64IF-NEXT:    ret
382  %a = call float @llvm.trunc.f32(float %x)
383  %b = fptoui float %a to i32
384  ret i32 %b
385}
386
387define i64 @test_trunc_ui64(float %x) {
388; RV32IF-LABEL: test_trunc_ui64:
389; RV32IF:       # %bb.0:
390; RV32IF-NEXT:    addi sp, sp, -16
391; RV32IF-NEXT:    .cfi_def_cfa_offset 16
392; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
393; RV32IF-NEXT:    .cfi_offset ra, -4
394; RV32IF-NEXT:    call truncf@plt
395; RV32IF-NEXT:    call __fixunssfdi@plt
396; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
397; RV32IF-NEXT:    addi sp, sp, 16
398; RV32IF-NEXT:    ret
399;
400; RV64IF-LABEL: test_trunc_ui64:
401; RV64IF:       # %bb.0:
402; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
403; RV64IF-NEXT:    ret
404  %a = call float @llvm.trunc.f32(float %x)
405  %b = fptoui float %a to i64
406  ret i64 %b
407}
408
409define signext i8 @test_round_si8(float %x) {
410; RV32IF-LABEL: test_round_si8:
411; RV32IF:       # %bb.0:
412; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
413; RV32IF-NEXT:    ret
414;
415; RV64IF-LABEL: test_round_si8:
416; RV64IF:       # %bb.0:
417; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
418; RV64IF-NEXT:    ret
419  %a = call float @llvm.round.f32(float %x)
420  %b = fptosi float %a to i8
421  ret i8 %b
422}
423
424define signext i16 @test_round_si16(float %x) {
425; RV32IF-LABEL: test_round_si16:
426; RV32IF:       # %bb.0:
427; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
428; RV32IF-NEXT:    ret
429;
430; RV64IF-LABEL: test_round_si16:
431; RV64IF:       # %bb.0:
432; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
433; RV64IF-NEXT:    ret
434  %a = call float @llvm.round.f32(float %x)
435  %b = fptosi float %a to i16
436  ret i16 %b
437}
438
439define signext i32 @test_round_si32(float %x) {
440; RV32IF-LABEL: test_round_si32:
441; RV32IF:       # %bb.0:
442; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
443; RV32IF-NEXT:    ret
444;
445; RV64IF-LABEL: test_round_si32:
446; RV64IF:       # %bb.0:
447; RV64IF-NEXT:    fcvt.w.s a0, fa0, rmm
448; RV64IF-NEXT:    ret
449  %a = call float @llvm.round.f32(float %x)
450  %b = fptosi float %a to i32
451  ret i32 %b
452}
453
454define i64 @test_round_si64(float %x) {
455; RV32IF-LABEL: test_round_si64:
456; RV32IF:       # %bb.0:
457; RV32IF-NEXT:    addi sp, sp, -16
458; RV32IF-NEXT:    .cfi_def_cfa_offset 16
459; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
460; RV32IF-NEXT:    .cfi_offset ra, -4
461; RV32IF-NEXT:    call roundf@plt
462; RV32IF-NEXT:    call __fixsfdi@plt
463; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
464; RV32IF-NEXT:    addi sp, sp, 16
465; RV32IF-NEXT:    ret
466;
467; RV64IF-LABEL: test_round_si64:
468; RV64IF:       # %bb.0:
469; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
470; RV64IF-NEXT:    ret
471  %a = call float @llvm.round.f32(float %x)
472  %b = fptosi float %a to i64
473  ret i64 %b
474}
475
476define zeroext i8 @test_round_ui8(float %x) {
477; RV32IF-LABEL: test_round_ui8:
478; RV32IF:       # %bb.0:
479; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
480; RV32IF-NEXT:    ret
481;
482; RV64IF-LABEL: test_round_ui8:
483; RV64IF:       # %bb.0:
484; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
485; RV64IF-NEXT:    ret
486  %a = call float @llvm.round.f32(float %x)
487  %b = fptoui float %a to i8
488  ret i8 %b
489}
490
491define zeroext i16 @test_round_ui16(float %x) {
492; RV32IF-LABEL: test_round_ui16:
493; RV32IF:       # %bb.0:
494; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
495; RV32IF-NEXT:    ret
496;
497; RV64IF-LABEL: test_round_ui16:
498; RV64IF:       # %bb.0:
499; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
500; RV64IF-NEXT:    ret
501  %a = call float @llvm.round.f32(float %x)
502  %b = fptoui float %a to i16
503  ret i16 %b
504}
505
506define signext i32 @test_round_ui32(float %x) {
507; RV32IF-LABEL: test_round_ui32:
508; RV32IF:       # %bb.0:
509; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
510; RV32IF-NEXT:    ret
511;
512; RV64IF-LABEL: test_round_ui32:
513; RV64IF:       # %bb.0:
514; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rmm
515; RV64IF-NEXT:    ret
516  %a = call float @llvm.round.f32(float %x)
517  %b = fptoui float %a to i32
518  ret i32 %b
519}
520
521define i64 @test_round_ui64(float %x) {
522; RV32IF-LABEL: test_round_ui64:
523; RV32IF:       # %bb.0:
524; RV32IF-NEXT:    addi sp, sp, -16
525; RV32IF-NEXT:    .cfi_def_cfa_offset 16
526; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
527; RV32IF-NEXT:    .cfi_offset ra, -4
528; RV32IF-NEXT:    call roundf@plt
529; RV32IF-NEXT:    call __fixunssfdi@plt
530; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
531; RV32IF-NEXT:    addi sp, sp, 16
532; RV32IF-NEXT:    ret
533;
534; RV64IF-LABEL: test_round_ui64:
535; RV64IF:       # %bb.0:
536; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
537; RV64IF-NEXT:    ret
538  %a = call float @llvm.round.f32(float %x)
539  %b = fptoui float %a to i64
540  ret i64 %b
541}
542
543define signext i8 @test_roundeven_si8(float %x) {
544; RV32IF-LABEL: test_roundeven_si8:
545; RV32IF:       # %bb.0:
546; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
547; RV32IF-NEXT:    ret
548;
549; RV64IF-LABEL: test_roundeven_si8:
550; RV64IF:       # %bb.0:
551; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
552; RV64IF-NEXT:    ret
553  %a = call float @llvm.roundeven.f32(float %x)
554  %b = fptosi float %a to i8
555  ret i8 %b
556}
557
558define signext i16 @test_roundeven_si16(float %x) {
559; RV32IF-LABEL: test_roundeven_si16:
560; RV32IF:       # %bb.0:
561; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
562; RV32IF-NEXT:    ret
563;
564; RV64IF-LABEL: test_roundeven_si16:
565; RV64IF:       # %bb.0:
566; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
567; RV64IF-NEXT:    ret
568  %a = call float @llvm.roundeven.f32(float %x)
569  %b = fptosi float %a to i16
570  ret i16 %b
571}
572
573define signext i32 @test_roundeven_si32(float %x) {
574; RV32IF-LABEL: test_roundeven_si32:
575; RV32IF:       # %bb.0:
576; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
577; RV32IF-NEXT:    ret
578;
579; RV64IF-LABEL: test_roundeven_si32:
580; RV64IF:       # %bb.0:
581; RV64IF-NEXT:    fcvt.w.s a0, fa0, rne
582; RV64IF-NEXT:    ret
583  %a = call float @llvm.roundeven.f32(float %x)
584  %b = fptosi float %a to i32
585  ret i32 %b
586}
587
588define i64 @test_roundeven_si64(float %x) {
589; RV32IF-LABEL: test_roundeven_si64:
590; RV32IF:       # %bb.0:
591; RV32IF-NEXT:    addi sp, sp, -16
592; RV32IF-NEXT:    .cfi_def_cfa_offset 16
593; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
594; RV32IF-NEXT:    .cfi_offset ra, -4
595; RV32IF-NEXT:    call roundevenf@plt
596; RV32IF-NEXT:    call __fixsfdi@plt
597; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
598; RV32IF-NEXT:    addi sp, sp, 16
599; RV32IF-NEXT:    ret
600;
601; RV64IF-LABEL: test_roundeven_si64:
602; RV64IF:       # %bb.0:
603; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
604; RV64IF-NEXT:    ret
605  %a = call float @llvm.roundeven.f32(float %x)
606  %b = fptosi float %a to i64
607  ret i64 %b
608}
609
610define zeroext i8 @test_roundeven_ui8(float %x) {
611; RV32IF-LABEL: test_roundeven_ui8:
612; RV32IF:       # %bb.0:
613; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
614; RV32IF-NEXT:    ret
615;
616; RV64IF-LABEL: test_roundeven_ui8:
617; RV64IF:       # %bb.0:
618; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
619; RV64IF-NEXT:    ret
620  %a = call float @llvm.roundeven.f32(float %x)
621  %b = fptoui float %a to i8
622  ret i8 %b
623}
624
625define zeroext i16 @test_roundeven_ui16(float %x) {
626; RV32IF-LABEL: test_roundeven_ui16:
627; RV32IF:       # %bb.0:
628; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
629; RV32IF-NEXT:    ret
630;
631; RV64IF-LABEL: test_roundeven_ui16:
632; RV64IF:       # %bb.0:
633; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
634; RV64IF-NEXT:    ret
635  %a = call float @llvm.roundeven.f32(float %x)
636  %b = fptoui float %a to i16
637  ret i16 %b
638}
639
640define signext i32 @test_roundeven_ui32(float %x) {
641; RV32IF-LABEL: test_roundeven_ui32:
642; RV32IF:       # %bb.0:
643; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
644; RV32IF-NEXT:    ret
645;
646; RV64IF-LABEL: test_roundeven_ui32:
647; RV64IF:       # %bb.0:
648; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rne
649; RV64IF-NEXT:    ret
650  %a = call float @llvm.roundeven.f32(float %x)
651  %b = fptoui float %a to i32
652  ret i32 %b
653}
654
655define i64 @test_roundeven_ui64(float %x) {
656; RV32IF-LABEL: test_roundeven_ui64:
657; RV32IF:       # %bb.0:
658; RV32IF-NEXT:    addi sp, sp, -16
659; RV32IF-NEXT:    .cfi_def_cfa_offset 16
660; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
661; RV32IF-NEXT:    .cfi_offset ra, -4
662; RV32IF-NEXT:    call roundevenf@plt
663; RV32IF-NEXT:    call __fixunssfdi@plt
664; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
665; RV32IF-NEXT:    addi sp, sp, 16
666; RV32IF-NEXT:    ret
667;
668; RV64IF-LABEL: test_roundeven_ui64:
669; RV64IF:       # %bb.0:
670; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
671; RV64IF-NEXT:    ret
672  %a = call float @llvm.roundeven.f32(float %x)
673  %b = fptoui float %a to i64
674  ret i64 %b
675}
676
677declare float @llvm.floor.f32(float)
678declare float @llvm.ceil.f32(float)
679declare float @llvm.trunc.f32(float)
680declare float @llvm.round.f32(float)
681declare float @llvm.roundeven.f32(float)
682