1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \
3; RUN:   -target-abi=ilp32f | FileCheck -check-prefix=RV32IF %s
4; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \
5; RUN:   -target-abi=lp64f | FileCheck -check-prefix=RV64IF %s
6
7define signext i8 @test_floor_si8(float %x) {
8; RV32IF-LABEL: test_floor_si8:
9; RV32IF:       # %bb.0:
10; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
11; RV32IF-NEXT:    ret
12;
13; RV64IF-LABEL: test_floor_si8:
14; RV64IF:       # %bb.0:
15; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
16; RV64IF-NEXT:    ret
17  %a = call float @llvm.floor.f32(float %x)
18  %b = fptosi float %a to i8
19  ret i8 %b
20}
21
22define signext i16 @test_floor_si16(float %x) {
23; RV32IF-LABEL: test_floor_si16:
24; RV32IF:       # %bb.0:
25; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
26; RV32IF-NEXT:    ret
27;
28; RV64IF-LABEL: test_floor_si16:
29; RV64IF:       # %bb.0:
30; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
31; RV64IF-NEXT:    ret
32  %a = call float @llvm.floor.f32(float %x)
33  %b = fptosi float %a to i16
34  ret i16 %b
35}
36
37define signext i32 @test_floor_si32(float %x) {
38; RV32IF-LABEL: test_floor_si32:
39; RV32IF:       # %bb.0:
40; RV32IF-NEXT:    fcvt.w.s a0, fa0, rdn
41; RV32IF-NEXT:    ret
42;
43; RV64IF-LABEL: test_floor_si32:
44; RV64IF:       # %bb.0:
45; RV64IF-NEXT:    fcvt.w.s a0, fa0, rdn
46; RV64IF-NEXT:    ret
47  %a = call float @llvm.floor.f32(float %x)
48  %b = fptosi float %a to i32
49  ret i32 %b
50}
51
52define i64 @test_floor_si64(float %x) {
53; RV32IF-LABEL: test_floor_si64:
54; RV32IF:       # %bb.0:
55; RV32IF-NEXT:    addi sp, sp, -16
56; RV32IF-NEXT:    .cfi_def_cfa_offset 16
57; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
58; RV32IF-NEXT:    .cfi_offset ra, -4
59; RV32IF-NEXT:    call floorf@plt
60; RV32IF-NEXT:    call __fixsfdi@plt
61; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
62; RV32IF-NEXT:    addi sp, sp, 16
63; RV32IF-NEXT:    ret
64;
65; RV64IF-LABEL: test_floor_si64:
66; RV64IF:       # %bb.0:
67; RV64IF-NEXT:    fcvt.l.s a0, fa0, rdn
68; RV64IF-NEXT:    ret
69  %a = call float @llvm.floor.f32(float %x)
70  %b = fptosi float %a to i64
71  ret i64 %b
72}
73
74define zeroext i8 @test_floor_ui8(float %x) {
75; RV32IF-LABEL: test_floor_ui8:
76; RV32IF:       # %bb.0:
77; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
78; RV32IF-NEXT:    ret
79;
80; RV64IF-LABEL: test_floor_ui8:
81; RV64IF:       # %bb.0:
82; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
83; RV64IF-NEXT:    ret
84  %a = call float @llvm.floor.f32(float %x)
85  %b = fptoui float %a to i8
86  ret i8 %b
87}
88
89define zeroext i16 @test_floor_ui16(float %x) {
90; RV32IF-LABEL: test_floor_ui16:
91; RV32IF:       # %bb.0:
92; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
93; RV32IF-NEXT:    ret
94;
95; RV64IF-LABEL: test_floor_ui16:
96; RV64IF:       # %bb.0:
97; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
98; RV64IF-NEXT:    ret
99  %a = call float @llvm.floor.f32(float %x)
100  %b = fptoui float %a to i16
101  ret i16 %b
102}
103
104define signext i32 @test_floor_ui32(float %x) {
105; RV32IF-LABEL: test_floor_ui32:
106; RV32IF:       # %bb.0:
107; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rdn
108; RV32IF-NEXT:    ret
109;
110; RV64IF-LABEL: test_floor_ui32:
111; RV64IF:       # %bb.0:
112; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rdn
113; RV64IF-NEXT:    ret
114  %a = call float @llvm.floor.f32(float %x)
115  %b = fptoui float %a to i32
116  ret i32 %b
117}
118
119define i64 @test_floor_ui64(float %x) {
120; RV32IF-LABEL: test_floor_ui64:
121; RV32IF:       # %bb.0:
122; RV32IF-NEXT:    addi sp, sp, -16
123; RV32IF-NEXT:    .cfi_def_cfa_offset 16
124; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
125; RV32IF-NEXT:    .cfi_offset ra, -4
126; RV32IF-NEXT:    call floorf@plt
127; RV32IF-NEXT:    call __fixunssfdi@plt
128; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
129; RV32IF-NEXT:    addi sp, sp, 16
130; RV32IF-NEXT:    ret
131;
132; RV64IF-LABEL: test_floor_ui64:
133; RV64IF:       # %bb.0:
134; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rdn
135; RV64IF-NEXT:    ret
136  %a = call float @llvm.floor.f32(float %x)
137  %b = fptoui float %a to i64
138  ret i64 %b
139}
140
141define signext i8 @test_ceil_si8(float %x) {
142; RV32IF-LABEL: test_ceil_si8:
143; RV32IF:       # %bb.0:
144; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
145; RV32IF-NEXT:    ret
146;
147; RV64IF-LABEL: test_ceil_si8:
148; RV64IF:       # %bb.0:
149; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
150; RV64IF-NEXT:    ret
151  %a = call float @llvm.ceil.f32(float %x)
152  %b = fptosi float %a to i8
153  ret i8 %b
154}
155
156define signext i16 @test_ceil_si16(float %x) {
157; RV32IF-LABEL: test_ceil_si16:
158; RV32IF:       # %bb.0:
159; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
160; RV32IF-NEXT:    ret
161;
162; RV64IF-LABEL: test_ceil_si16:
163; RV64IF:       # %bb.0:
164; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
165; RV64IF-NEXT:    ret
166  %a = call float @llvm.ceil.f32(float %x)
167  %b = fptosi float %a to i16
168  ret i16 %b
169}
170
171define signext i32 @test_ceil_si32(float %x) {
172; RV32IF-LABEL: test_ceil_si32:
173; RV32IF:       # %bb.0:
174; RV32IF-NEXT:    fcvt.w.s a0, fa0, rup
175; RV32IF-NEXT:    ret
176;
177; RV64IF-LABEL: test_ceil_si32:
178; RV64IF:       # %bb.0:
179; RV64IF-NEXT:    fcvt.w.s a0, fa0, rup
180; RV64IF-NEXT:    ret
181  %a = call float @llvm.ceil.f32(float %x)
182  %b = fptosi float %a to i32
183  ret i32 %b
184}
185
186define i64 @test_ceil_si64(float %x) {
187; RV32IF-LABEL: test_ceil_si64:
188; RV32IF:       # %bb.0:
189; RV32IF-NEXT:    addi sp, sp, -16
190; RV32IF-NEXT:    .cfi_def_cfa_offset 16
191; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
192; RV32IF-NEXT:    .cfi_offset ra, -4
193; RV32IF-NEXT:    call ceilf@plt
194; RV32IF-NEXT:    call __fixsfdi@plt
195; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
196; RV32IF-NEXT:    addi sp, sp, 16
197; RV32IF-NEXT:    ret
198;
199; RV64IF-LABEL: test_ceil_si64:
200; RV64IF:       # %bb.0:
201; RV64IF-NEXT:    fcvt.l.s a0, fa0, rup
202; RV64IF-NEXT:    ret
203  %a = call float @llvm.ceil.f32(float %x)
204  %b = fptosi float %a to i64
205  ret i64 %b
206}
207
208define zeroext i8 @test_ceil_ui8(float %x) {
209; RV32IF-LABEL: test_ceil_ui8:
210; RV32IF:       # %bb.0:
211; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
212; RV32IF-NEXT:    ret
213;
214; RV64IF-LABEL: test_ceil_ui8:
215; RV64IF:       # %bb.0:
216; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
217; RV64IF-NEXT:    ret
218  %a = call float @llvm.ceil.f32(float %x)
219  %b = fptoui float %a to i8
220  ret i8 %b
221}
222
223define zeroext i16 @test_ceil_ui16(float %x) {
224; RV32IF-LABEL: test_ceil_ui16:
225; RV32IF:       # %bb.0:
226; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
227; RV32IF-NEXT:    ret
228;
229; RV64IF-LABEL: test_ceil_ui16:
230; RV64IF:       # %bb.0:
231; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
232; RV64IF-NEXT:    ret
233  %a = call float @llvm.ceil.f32(float %x)
234  %b = fptoui float %a to i16
235  ret i16 %b
236}
237
238define signext i32 @test_ceil_ui32(float %x) {
239; RV32IF-LABEL: test_ceil_ui32:
240; RV32IF:       # %bb.0:
241; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rup
242; RV32IF-NEXT:    ret
243;
244; RV64IF-LABEL: test_ceil_ui32:
245; RV64IF:       # %bb.0:
246; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rup
247; RV64IF-NEXT:    ret
248  %a = call float @llvm.ceil.f32(float %x)
249  %b = fptoui float %a to i32
250  ret i32 %b
251}
252
253define i64 @test_ceil_ui64(float %x) {
254; RV32IF-LABEL: test_ceil_ui64:
255; RV32IF:       # %bb.0:
256; RV32IF-NEXT:    addi sp, sp, -16
257; RV32IF-NEXT:    .cfi_def_cfa_offset 16
258; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
259; RV32IF-NEXT:    .cfi_offset ra, -4
260; RV32IF-NEXT:    call ceilf@plt
261; RV32IF-NEXT:    call __fixunssfdi@plt
262; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
263; RV32IF-NEXT:    addi sp, sp, 16
264; RV32IF-NEXT:    ret
265;
266; RV64IF-LABEL: test_ceil_ui64:
267; RV64IF:       # %bb.0:
268; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rup
269; RV64IF-NEXT:    ret
270  %a = call float @llvm.ceil.f32(float %x)
271  %b = fptoui float %a to i64
272  ret i64 %b
273}
274
275define signext i8 @test_trunc_si8(float %x) {
276; RV32IF-LABEL: test_trunc_si8:
277; RV32IF:       # %bb.0:
278; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
279; RV32IF-NEXT:    ret
280;
281; RV64IF-LABEL: test_trunc_si8:
282; RV64IF:       # %bb.0:
283; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
284; RV64IF-NEXT:    ret
285  %a = call float @llvm.trunc.f32(float %x)
286  %b = fptosi float %a to i8
287  ret i8 %b
288}
289
290define signext i16 @test_trunc_si16(float %x) {
291; RV32IF-LABEL: test_trunc_si16:
292; RV32IF:       # %bb.0:
293; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
294; RV32IF-NEXT:    ret
295;
296; RV64IF-LABEL: test_trunc_si16:
297; RV64IF:       # %bb.0:
298; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
299; RV64IF-NEXT:    ret
300  %a = call float @llvm.trunc.f32(float %x)
301  %b = fptosi float %a to i16
302  ret i16 %b
303}
304
305define signext i32 @test_trunc_si32(float %x) {
306; RV32IF-LABEL: test_trunc_si32:
307; RV32IF:       # %bb.0:
308; RV32IF-NEXT:    fcvt.w.s a0, fa0, rtz
309; RV32IF-NEXT:    ret
310;
311; RV64IF-LABEL: test_trunc_si32:
312; RV64IF:       # %bb.0:
313; RV64IF-NEXT:    fcvt.w.s a0, fa0, rtz
314; RV64IF-NEXT:    ret
315  %a = call float @llvm.trunc.f32(float %x)
316  %b = fptosi float %a to i32
317  ret i32 %b
318}
319
320define i64 @test_trunc_si64(float %x) {
321; RV32IF-LABEL: test_trunc_si64:
322; RV32IF:       # %bb.0:
323; RV32IF-NEXT:    addi sp, sp, -16
324; RV32IF-NEXT:    .cfi_def_cfa_offset 16
325; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
326; RV32IF-NEXT:    .cfi_offset ra, -4
327; RV32IF-NEXT:    call truncf@plt
328; RV32IF-NEXT:    call __fixsfdi@plt
329; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
330; RV32IF-NEXT:    addi sp, sp, 16
331; RV32IF-NEXT:    ret
332;
333; RV64IF-LABEL: test_trunc_si64:
334; RV64IF:       # %bb.0:
335; RV64IF-NEXT:    fcvt.l.s a0, fa0, rtz
336; RV64IF-NEXT:    ret
337  %a = call float @llvm.trunc.f32(float %x)
338  %b = fptosi float %a to i64
339  ret i64 %b
340}
341
342define zeroext i8 @test_trunc_ui8(float %x) {
343; RV32IF-LABEL: test_trunc_ui8:
344; RV32IF:       # %bb.0:
345; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
346; RV32IF-NEXT:    ret
347;
348; RV64IF-LABEL: test_trunc_ui8:
349; RV64IF:       # %bb.0:
350; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
351; RV64IF-NEXT:    ret
352  %a = call float @llvm.trunc.f32(float %x)
353  %b = fptoui float %a to i8
354  ret i8 %b
355}
356
357define zeroext i16 @test_trunc_ui16(float %x) {
358; RV32IF-LABEL: test_trunc_ui16:
359; RV32IF:       # %bb.0:
360; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
361; RV32IF-NEXT:    ret
362;
363; RV64IF-LABEL: test_trunc_ui16:
364; RV64IF:       # %bb.0:
365; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
366; RV64IF-NEXT:    ret
367  %a = call float @llvm.trunc.f32(float %x)
368  %b = fptoui float %a to i16
369  ret i16 %b
370}
371
372define signext i32 @test_trunc_ui32(float %x) {
373; RV32IF-LABEL: test_trunc_ui32:
374; RV32IF:       # %bb.0:
375; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rtz
376; RV32IF-NEXT:    ret
377;
378; RV64IF-LABEL: test_trunc_ui32:
379; RV64IF:       # %bb.0:
380; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rtz
381; RV64IF-NEXT:    ret
382  %a = call float @llvm.trunc.f32(float %x)
383  %b = fptoui float %a to i32
384  ret i32 %b
385}
386
387define i64 @test_trunc_ui64(float %x) {
388; RV32IF-LABEL: test_trunc_ui64:
389; RV32IF:       # %bb.0:
390; RV32IF-NEXT:    addi sp, sp, -16
391; RV32IF-NEXT:    .cfi_def_cfa_offset 16
392; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
393; RV32IF-NEXT:    .cfi_offset ra, -4
394; RV32IF-NEXT:    call truncf@plt
395; RV32IF-NEXT:    call __fixunssfdi@plt
396; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
397; RV32IF-NEXT:    addi sp, sp, 16
398; RV32IF-NEXT:    ret
399;
400; RV64IF-LABEL: test_trunc_ui64:
401; RV64IF:       # %bb.0:
402; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rtz
403; RV64IF-NEXT:    ret
404  %a = call float @llvm.trunc.f32(float %x)
405  %b = fptoui float %a to i64
406  ret i64 %b
407}
408
409define signext i8 @test_round_si8(float %x) {
410; RV32IF-LABEL: test_round_si8:
411; RV32IF:       # %bb.0:
412; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
413; RV32IF-NEXT:    ret
414;
415; RV64IF-LABEL: test_round_si8:
416; RV64IF:       # %bb.0:
417; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
418; RV64IF-NEXT:    ret
419  %a = call float @llvm.round.f32(float %x)
420  %b = fptosi float %a to i8
421  ret i8 %b
422}
423
424define signext i16 @test_round_si16(float %x) {
425; RV32IF-LABEL: test_round_si16:
426; RV32IF:       # %bb.0:
427; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
428; RV32IF-NEXT:    ret
429;
430; RV64IF-LABEL: test_round_si16:
431; RV64IF:       # %bb.0:
432; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
433; RV64IF-NEXT:    ret
434  %a = call float @llvm.round.f32(float %x)
435  %b = fptosi float %a to i16
436  ret i16 %b
437}
438
439define signext i32 @test_round_si32(float %x) {
440; RV32IF-LABEL: test_round_si32:
441; RV32IF:       # %bb.0:
442; RV32IF-NEXT:    fcvt.w.s a0, fa0, rmm
443; RV32IF-NEXT:    ret
444;
445; RV64IF-LABEL: test_round_si32:
446; RV64IF:       # %bb.0:
447; RV64IF-NEXT:    fcvt.w.s a0, fa0, rmm
448; RV64IF-NEXT:    ret
449  %a = call float @llvm.round.f32(float %x)
450  %b = fptosi float %a to i32
451  ret i32 %b
452}
453
454define i64 @test_round_si64(float %x) {
455; RV32IF-LABEL: test_round_si64:
456; RV32IF:       # %bb.0:
457; RV32IF-NEXT:    addi sp, sp, -16
458; RV32IF-NEXT:    .cfi_def_cfa_offset 16
459; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
460; RV32IF-NEXT:    .cfi_offset ra, -4
461; RV32IF-NEXT:    call roundf@plt
462; RV32IF-NEXT:    call __fixsfdi@plt
463; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
464; RV32IF-NEXT:    addi sp, sp, 16
465; RV32IF-NEXT:    ret
466;
467; RV64IF-LABEL: test_round_si64:
468; RV64IF:       # %bb.0:
469; RV64IF-NEXT:    fcvt.l.s a0, fa0, rmm
470; RV64IF-NEXT:    ret
471  %a = call float @llvm.round.f32(float %x)
472  %b = fptosi float %a to i64
473  ret i64 %b
474}
475
476define zeroext i8 @test_round_ui8(float %x) {
477; RV32IF-LABEL: test_round_ui8:
478; RV32IF:       # %bb.0:
479; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
480; RV32IF-NEXT:    ret
481;
482; RV64IF-LABEL: test_round_ui8:
483; RV64IF:       # %bb.0:
484; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
485; RV64IF-NEXT:    ret
486  %a = call float @llvm.round.f32(float %x)
487  %b = fptoui float %a to i8
488  ret i8 %b
489}
490
491define zeroext i16 @test_round_ui16(float %x) {
492; RV32IF-LABEL: test_round_ui16:
493; RV32IF:       # %bb.0:
494; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
495; RV32IF-NEXT:    ret
496;
497; RV64IF-LABEL: test_round_ui16:
498; RV64IF:       # %bb.0:
499; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
500; RV64IF-NEXT:    ret
501  %a = call float @llvm.round.f32(float %x)
502  %b = fptoui float %a to i16
503  ret i16 %b
504}
505
506define signext i32 @test_round_ui32(float %x) {
507; RV32IF-LABEL: test_round_ui32:
508; RV32IF:       # %bb.0:
509; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rmm
510; RV32IF-NEXT:    ret
511;
512; RV64IF-LABEL: test_round_ui32:
513; RV64IF:       # %bb.0:
514; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rmm
515; RV64IF-NEXT:    ret
516  %a = call float @llvm.round.f32(float %x)
517  %b = fptoui float %a to i32
518  ret i32 %b
519}
520
521define i64 @test_round_ui64(float %x) {
522; RV32IF-LABEL: test_round_ui64:
523; RV32IF:       # %bb.0:
524; RV32IF-NEXT:    addi sp, sp, -16
525; RV32IF-NEXT:    .cfi_def_cfa_offset 16
526; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
527; RV32IF-NEXT:    .cfi_offset ra, -4
528; RV32IF-NEXT:    call roundf@plt
529; RV32IF-NEXT:    call __fixunssfdi@plt
530; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
531; RV32IF-NEXT:    addi sp, sp, 16
532; RV32IF-NEXT:    ret
533;
534; RV64IF-LABEL: test_round_ui64:
535; RV64IF:       # %bb.0:
536; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rmm
537; RV64IF-NEXT:    ret
538  %a = call float @llvm.round.f32(float %x)
539  %b = fptoui float %a to i64
540  ret i64 %b
541}
542
543define signext i8 @test_roundeven_si8(float %x) {
544; RV32IF-LABEL: test_roundeven_si8:
545; RV32IF:       # %bb.0:
546; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
547; RV32IF-NEXT:    ret
548;
549; RV64IF-LABEL: test_roundeven_si8:
550; RV64IF:       # %bb.0:
551; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
552; RV64IF-NEXT:    ret
553  %a = call float @llvm.roundeven.f32(float %x)
554  %b = fptosi float %a to i8
555  ret i8 %b
556}
557
558define signext i16 @test_roundeven_si16(float %x) {
559; RV32IF-LABEL: test_roundeven_si16:
560; RV32IF:       # %bb.0:
561; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
562; RV32IF-NEXT:    ret
563;
564; RV64IF-LABEL: test_roundeven_si16:
565; RV64IF:       # %bb.0:
566; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
567; RV64IF-NEXT:    ret
568  %a = call float @llvm.roundeven.f32(float %x)
569  %b = fptosi float %a to i16
570  ret i16 %b
571}
572
573define signext i32 @test_roundeven_si32(float %x) {
574; RV32IF-LABEL: test_roundeven_si32:
575; RV32IF:       # %bb.0:
576; RV32IF-NEXT:    fcvt.w.s a0, fa0, rne
577; RV32IF-NEXT:    ret
578;
579; RV64IF-LABEL: test_roundeven_si32:
580; RV64IF:       # %bb.0:
581; RV64IF-NEXT:    fcvt.w.s a0, fa0, rne
582; RV64IF-NEXT:    ret
583  %a = call float @llvm.roundeven.f32(float %x)
584  %b = fptosi float %a to i32
585  ret i32 %b
586}
587
588define i64 @test_roundeven_si64(float %x) {
589; RV32IF-LABEL: test_roundeven_si64:
590; RV32IF:       # %bb.0:
591; RV32IF-NEXT:    addi sp, sp, -16
592; RV32IF-NEXT:    .cfi_def_cfa_offset 16
593; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
594; RV32IF-NEXT:    .cfi_offset ra, -4
595; RV32IF-NEXT:    call roundevenf@plt
596; RV32IF-NEXT:    call __fixsfdi@plt
597; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
598; RV32IF-NEXT:    addi sp, sp, 16
599; RV32IF-NEXT:    ret
600;
601; RV64IF-LABEL: test_roundeven_si64:
602; RV64IF:       # %bb.0:
603; RV64IF-NEXT:    fcvt.l.s a0, fa0, rne
604; RV64IF-NEXT:    ret
605  %a = call float @llvm.roundeven.f32(float %x)
606  %b = fptosi float %a to i64
607  ret i64 %b
608}
609
610define zeroext i8 @test_roundeven_ui8(float %x) {
611; RV32IF-LABEL: test_roundeven_ui8:
612; RV32IF:       # %bb.0:
613; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
614; RV32IF-NEXT:    ret
615;
616; RV64IF-LABEL: test_roundeven_ui8:
617; RV64IF:       # %bb.0:
618; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
619; RV64IF-NEXT:    ret
620  %a = call float @llvm.roundeven.f32(float %x)
621  %b = fptoui float %a to i8
622  ret i8 %b
623}
624
625define zeroext i16 @test_roundeven_ui16(float %x) {
626; RV32IF-LABEL: test_roundeven_ui16:
627; RV32IF:       # %bb.0:
628; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
629; RV32IF-NEXT:    ret
630;
631; RV64IF-LABEL: test_roundeven_ui16:
632; RV64IF:       # %bb.0:
633; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
634; RV64IF-NEXT:    ret
635  %a = call float @llvm.roundeven.f32(float %x)
636  %b = fptoui float %a to i16
637  ret i16 %b
638}
639
640define signext i32 @test_roundeven_ui32(float %x) {
641; RV32IF-LABEL: test_roundeven_ui32:
642; RV32IF:       # %bb.0:
643; RV32IF-NEXT:    fcvt.wu.s a0, fa0, rne
644; RV32IF-NEXT:    ret
645;
646; RV64IF-LABEL: test_roundeven_ui32:
647; RV64IF:       # %bb.0:
648; RV64IF-NEXT:    fcvt.wu.s a0, fa0, rne
649; RV64IF-NEXT:    ret
650  %a = call float @llvm.roundeven.f32(float %x)
651  %b = fptoui float %a to i32
652  ret i32 %b
653}
654
655define i64 @test_roundeven_ui64(float %x) {
656; RV32IF-LABEL: test_roundeven_ui64:
657; RV32IF:       # %bb.0:
658; RV32IF-NEXT:    addi sp, sp, -16
659; RV32IF-NEXT:    .cfi_def_cfa_offset 16
660; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
661; RV32IF-NEXT:    .cfi_offset ra, -4
662; RV32IF-NEXT:    call roundevenf@plt
663; RV32IF-NEXT:    call __fixunssfdi@plt
664; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
665; RV32IF-NEXT:    addi sp, sp, 16
666; RV32IF-NEXT:    ret
667;
668; RV64IF-LABEL: test_roundeven_ui64:
669; RV64IF:       # %bb.0:
670; RV64IF-NEXT:    fcvt.lu.s a0, fa0, rne
671; RV64IF-NEXT:    ret
672  %a = call float @llvm.roundeven.f32(float %x)
673  %b = fptoui float %a to i64
674  ret i64 %b
675}
676
677define float @test_floor_float(float %x) {
678; RV32IFD-LABEL: test_floor_float:
679; RV32IFD:       # %bb.0:
680; RV32IFD-NEXT:    addi sp, sp, -16
681; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
682; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
683; RV32IFD-NEXT:    .cfi_offset ra, -4
684; RV32IFD-NEXT:    call floor@plt
685; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
686; RV32IFD-NEXT:    addi sp, sp, 16
687; RV32IFD-NEXT:    ret
688;
689; RV64IFD-LABEL: test_floor_float:
690; RV64IFD:       # %bb.0:
691; RV64IFD-NEXT:    addi sp, sp, -16
692; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
693; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
694; RV64IFD-NEXT:    .cfi_offset ra, -8
695; RV64IFD-NEXT:    call floor@plt
696; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
697; RV64IFD-NEXT:    addi sp, sp, 16
698; RV64IFD-NEXT:    ret
699; RV32IF-LABEL: test_floor_float:
700; RV32IF:       # %bb.0:
701; RV32IF-NEXT:    addi sp, sp, -16
702; RV32IF-NEXT:    .cfi_def_cfa_offset 16
703; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
704; RV32IF-NEXT:    .cfi_offset ra, -4
705; RV32IF-NEXT:    call floorf@plt
706; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
707; RV32IF-NEXT:    addi sp, sp, 16
708; RV32IF-NEXT:    ret
709;
710; RV64IF-LABEL: test_floor_float:
711; RV64IF:       # %bb.0:
712; RV64IF-NEXT:    addi sp, sp, -16
713; RV64IF-NEXT:    .cfi_def_cfa_offset 16
714; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
715; RV64IF-NEXT:    .cfi_offset ra, -8
716; RV64IF-NEXT:    call floorf@plt
717; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
718; RV64IF-NEXT:    addi sp, sp, 16
719; RV64IF-NEXT:    ret
720  %a = call float @llvm.floor.f32(float %x)
721  ret float %a
722}
723
724define float @test_ceil_float(float %x) {
725; RV32IFD-LABEL: test_ceil_float:
726; RV32IFD:       # %bb.0:
727; RV32IFD-NEXT:    addi sp, sp, -16
728; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
729; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
730; RV32IFD-NEXT:    .cfi_offset ra, -4
731; RV32IFD-NEXT:    call ceil@plt
732; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
733; RV32IFD-NEXT:    addi sp, sp, 16
734; RV32IFD-NEXT:    ret
735;
736; RV64IFD-LABEL: test_ceil_float:
737; RV64IFD:       # %bb.0:
738; RV64IFD-NEXT:    addi sp, sp, -16
739; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
740; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
741; RV64IFD-NEXT:    .cfi_offset ra, -8
742; RV64IFD-NEXT:    call ceil@plt
743; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
744; RV64IFD-NEXT:    addi sp, sp, 16
745; RV64IFD-NEXT:    ret
746; RV32IF-LABEL: test_ceil_float:
747; RV32IF:       # %bb.0:
748; RV32IF-NEXT:    addi sp, sp, -16
749; RV32IF-NEXT:    .cfi_def_cfa_offset 16
750; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
751; RV32IF-NEXT:    .cfi_offset ra, -4
752; RV32IF-NEXT:    call ceilf@plt
753; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
754; RV32IF-NEXT:    addi sp, sp, 16
755; RV32IF-NEXT:    ret
756;
757; RV64IF-LABEL: test_ceil_float:
758; RV64IF:       # %bb.0:
759; RV64IF-NEXT:    addi sp, sp, -16
760; RV64IF-NEXT:    .cfi_def_cfa_offset 16
761; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
762; RV64IF-NEXT:    .cfi_offset ra, -8
763; RV64IF-NEXT:    call ceilf@plt
764; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
765; RV64IF-NEXT:    addi sp, sp, 16
766; RV64IF-NEXT:    ret
767  %a = call float @llvm.ceil.f32(float %x)
768  ret float %a
769}
770
771define float @test_trunc_float(float %x) {
772; RV32IFD-LABEL: test_trunc_float:
773; RV32IFD:       # %bb.0:
774; RV32IFD-NEXT:    addi sp, sp, -16
775; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
776; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
777; RV32IFD-NEXT:    .cfi_offset ra, -4
778; RV32IFD-NEXT:    call trunc@plt
779; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
780; RV32IFD-NEXT:    addi sp, sp, 16
781; RV32IFD-NEXT:    ret
782;
783; RV64IFD-LABEL: test_trunc_float:
784; RV64IFD:       # %bb.0:
785; RV64IFD-NEXT:    addi sp, sp, -16
786; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
787; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
788; RV64IFD-NEXT:    .cfi_offset ra, -8
789; RV64IFD-NEXT:    call trunc@plt
790; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
791; RV64IFD-NEXT:    addi sp, sp, 16
792; RV64IFD-NEXT:    ret
793; RV32IF-LABEL: test_trunc_float:
794; RV32IF:       # %bb.0:
795; RV32IF-NEXT:    addi sp, sp, -16
796; RV32IF-NEXT:    .cfi_def_cfa_offset 16
797; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
798; RV32IF-NEXT:    .cfi_offset ra, -4
799; RV32IF-NEXT:    call truncf@plt
800; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
801; RV32IF-NEXT:    addi sp, sp, 16
802; RV32IF-NEXT:    ret
803;
804; RV64IF-LABEL: test_trunc_float:
805; RV64IF:       # %bb.0:
806; RV64IF-NEXT:    addi sp, sp, -16
807; RV64IF-NEXT:    .cfi_def_cfa_offset 16
808; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
809; RV64IF-NEXT:    .cfi_offset ra, -8
810; RV64IF-NEXT:    call truncf@plt
811; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
812; RV64IF-NEXT:    addi sp, sp, 16
813; RV64IF-NEXT:    ret
814  %a = call float @llvm.trunc.f32(float %x)
815  ret float %a
816}
817
818define float @test_round_float(float %x) {
819; RV32IFD-LABEL: test_round_float:
820; RV32IFD:       # %bb.0:
821; RV32IFD-NEXT:    addi sp, sp, -16
822; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
823; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
824; RV32IFD-NEXT:    .cfi_offset ra, -4
825; RV32IFD-NEXT:    call round@plt
826; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
827; RV32IFD-NEXT:    addi sp, sp, 16
828; RV32IFD-NEXT:    ret
829;
830; RV64IFD-LABEL: test_round_float:
831; RV64IFD:       # %bb.0:
832; RV64IFD-NEXT:    addi sp, sp, -16
833; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
834; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
835; RV64IFD-NEXT:    .cfi_offset ra, -8
836; RV64IFD-NEXT:    call round@plt
837; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
838; RV64IFD-NEXT:    addi sp, sp, 16
839; RV64IFD-NEXT:    ret
840; RV32IF-LABEL: test_round_float:
841; RV32IF:       # %bb.0:
842; RV32IF-NEXT:    addi sp, sp, -16
843; RV32IF-NEXT:    .cfi_def_cfa_offset 16
844; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
845; RV32IF-NEXT:    .cfi_offset ra, -4
846; RV32IF-NEXT:    call roundf@plt
847; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
848; RV32IF-NEXT:    addi sp, sp, 16
849; RV32IF-NEXT:    ret
850;
851; RV64IF-LABEL: test_round_float:
852; RV64IF:       # %bb.0:
853; RV64IF-NEXT:    addi sp, sp, -16
854; RV64IF-NEXT:    .cfi_def_cfa_offset 16
855; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
856; RV64IF-NEXT:    .cfi_offset ra, -8
857; RV64IF-NEXT:    call roundf@plt
858; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
859; RV64IF-NEXT:    addi sp, sp, 16
860; RV64IF-NEXT:    ret
861  %a = call float @llvm.round.f32(float %x)
862  ret float %a
863}
864
865define float @test_roundeven_float(float %x) {
866; RV32IFD-LABEL: test_roundeven_float:
867; RV32IFD:       # %bb.0:
868; RV32IFD-NEXT:    addi sp, sp, -16
869; RV32IFD-NEXT:    .cfi_def_cfa_offset 16
870; RV32IFD-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
871; RV32IFD-NEXT:    .cfi_offset ra, -4
872; RV32IFD-NEXT:    call roundeven@plt
873; RV32IFD-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
874; RV32IFD-NEXT:    addi sp, sp, 16
875; RV32IFD-NEXT:    ret
876;
877; RV64IFD-LABEL: test_roundeven_float:
878; RV64IFD:       # %bb.0:
879; RV64IFD-NEXT:    addi sp, sp, -16
880; RV64IFD-NEXT:    .cfi_def_cfa_offset 16
881; RV64IFD-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
882; RV64IFD-NEXT:    .cfi_offset ra, -8
883; RV64IFD-NEXT:    call roundeven@plt
884; RV64IFD-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
885; RV64IFD-NEXT:    addi sp, sp, 16
886; RV64IFD-NEXT:    ret
887; RV32IF-LABEL: test_roundeven_float:
888; RV32IF:       # %bb.0:
889; RV32IF-NEXT:    addi sp, sp, -16
890; RV32IF-NEXT:    .cfi_def_cfa_offset 16
891; RV32IF-NEXT:    sw ra, 12(sp) # 4-byte Folded Spill
892; RV32IF-NEXT:    .cfi_offset ra, -4
893; RV32IF-NEXT:    call roundevenf@plt
894; RV32IF-NEXT:    lw ra, 12(sp) # 4-byte Folded Reload
895; RV32IF-NEXT:    addi sp, sp, 16
896; RV32IF-NEXT:    ret
897;
898; RV64IF-LABEL: test_roundeven_float:
899; RV64IF:       # %bb.0:
900; RV64IF-NEXT:    addi sp, sp, -16
901; RV64IF-NEXT:    .cfi_def_cfa_offset 16
902; RV64IF-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
903; RV64IF-NEXT:    .cfi_offset ra, -8
904; RV64IF-NEXT:    call roundevenf@plt
905; RV64IF-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
906; RV64IF-NEXT:    addi sp, sp, 16
907; RV64IF-NEXT:    ret
908  %a = call float @llvm.roundeven.f32(float %x)
909  ret float %a
910}
911
912declare float @llvm.floor.f32(float)
913declare float @llvm.ceil.f32(float)
914declare float @llvm.trunc.f32(float)
915declare float @llvm.round.f32(float)
916declare float @llvm.roundeven.f32(float)
917