1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc --mtriple=loongarch32 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA32F
3; RUN: llc --mtriple=loongarch32 --mattr=+d < %s | FileCheck %s --check-prefix=LA32D
4; RUN: llc --mtriple=loongarch64 --mattr=+f,-d < %s | FileCheck %s --check-prefix=LA64F
5; RUN: llc --mtriple=loongarch64 --mattr=+d < %s | FileCheck %s --check-prefix=LA64D
6
7define signext i8 @convert_float_to_i8(float %a) nounwind {
8; LA32F-LABEL: convert_float_to_i8:
9; LA32F:       # %bb.0:
10; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
11; LA32F-NEXT:    movfr2gr.s $a0, $fa0
12; LA32F-NEXT:    jirl $zero, $ra, 0
13;
14; LA32D-LABEL: convert_float_to_i8:
15; LA32D:       # %bb.0:
16; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
17; LA32D-NEXT:    movfr2gr.s $a0, $fa0
18; LA32D-NEXT:    jirl $zero, $ra, 0
19;
20; LA64F-LABEL: convert_float_to_i8:
21; LA64F:       # %bb.0:
22; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
23; LA64F-NEXT:    movfr2gr.s $a0, $fa0
24; LA64F-NEXT:    jirl $zero, $ra, 0
25;
26; LA64D-LABEL: convert_float_to_i8:
27; LA64D:       # %bb.0:
28; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
29; LA64D-NEXT:    movfr2gr.d $a0, $fa0
30; LA64D-NEXT:    jirl $zero, $ra, 0
31  %1 = fptosi float %a to i8
32  ret i8 %1
33}
34
35define signext i16 @convert_float_to_i16(float %a) nounwind {
36; LA32F-LABEL: convert_float_to_i16:
37; LA32F:       # %bb.0:
38; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
39; LA32F-NEXT:    movfr2gr.s $a0, $fa0
40; LA32F-NEXT:    jirl $zero, $ra, 0
41;
42; LA32D-LABEL: convert_float_to_i16:
43; LA32D:       # %bb.0:
44; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
45; LA32D-NEXT:    movfr2gr.s $a0, $fa0
46; LA32D-NEXT:    jirl $zero, $ra, 0
47;
48; LA64F-LABEL: convert_float_to_i16:
49; LA64F:       # %bb.0:
50; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
51; LA64F-NEXT:    movfr2gr.s $a0, $fa0
52; LA64F-NEXT:    jirl $zero, $ra, 0
53;
54; LA64D-LABEL: convert_float_to_i16:
55; LA64D:       # %bb.0:
56; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
57; LA64D-NEXT:    movfr2gr.d $a0, $fa0
58; LA64D-NEXT:    jirl $zero, $ra, 0
59  %1 = fptosi float %a to i16
60  ret i16 %1
61}
62
63define i32 @convert_float_to_i32(float %a) nounwind {
64; LA32F-LABEL: convert_float_to_i32:
65; LA32F:       # %bb.0:
66; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
67; LA32F-NEXT:    movfr2gr.s $a0, $fa0
68; LA32F-NEXT:    jirl $zero, $ra, 0
69;
70; LA32D-LABEL: convert_float_to_i32:
71; LA32D:       # %bb.0:
72; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
73; LA32D-NEXT:    movfr2gr.s $a0, $fa0
74; LA32D-NEXT:    jirl $zero, $ra, 0
75;
76; LA64F-LABEL: convert_float_to_i32:
77; LA64F:       # %bb.0:
78; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
79; LA64F-NEXT:    movfr2gr.s $a0, $fa0
80; LA64F-NEXT:    jirl $zero, $ra, 0
81;
82; LA64D-LABEL: convert_float_to_i32:
83; LA64D:       # %bb.0:
84; LA64D-NEXT:    ftintrz.w.s $fa0, $fa0
85; LA64D-NEXT:    movfr2gr.s $a0, $fa0
86; LA64D-NEXT:    jirl $zero, $ra, 0
87  %1 = fptosi float %a to i32
88  ret i32 %1
89}
90
91define i64 @convert_float_to_i64(float %a) nounwind {
92; LA32F-LABEL: convert_float_to_i64:
93; LA32F:       # %bb.0:
94; LA32F-NEXT:    addi.w $sp, $sp, -16
95; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
96; LA32F-NEXT:    bl __fixsfdi
97; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
98; LA32F-NEXT:    addi.w $sp, $sp, 16
99; LA32F-NEXT:    jirl $zero, $ra, 0
100;
101; LA32D-LABEL: convert_float_to_i64:
102; LA32D:       # %bb.0:
103; LA32D-NEXT:    addi.w $sp, $sp, -16
104; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
105; LA32D-NEXT:    bl __fixsfdi
106; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
107; LA32D-NEXT:    addi.w $sp, $sp, 16
108; LA32D-NEXT:    jirl $zero, $ra, 0
109;
110; LA64F-LABEL: convert_float_to_i64:
111; LA64F:       # %bb.0:
112; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
113; LA64F-NEXT:    movfr2gr.s $a0, $fa0
114; LA64F-NEXT:    jirl $zero, $ra, 0
115;
116; LA64D-LABEL: convert_float_to_i64:
117; LA64D:       # %bb.0:
118; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
119; LA64D-NEXT:    movfr2gr.d $a0, $fa0
120; LA64D-NEXT:    jirl $zero, $ra, 0
121  %1 = fptosi float %a to i64
122  ret i64 %1
123}
124
125define zeroext i8 @convert_float_to_u8(float %a) nounwind {
126; LA32F-LABEL: convert_float_to_u8:
127; LA32F:       # %bb.0:
128; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
129; LA32F-NEXT:    movfr2gr.s $a0, $fa0
130; LA32F-NEXT:    jirl $zero, $ra, 0
131;
132; LA32D-LABEL: convert_float_to_u8:
133; LA32D:       # %bb.0:
134; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
135; LA32D-NEXT:    movfr2gr.s $a0, $fa0
136; LA32D-NEXT:    jirl $zero, $ra, 0
137;
138; LA64F-LABEL: convert_float_to_u8:
139; LA64F:       # %bb.0:
140; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
141; LA64F-NEXT:    movfr2gr.s $a0, $fa0
142; LA64F-NEXT:    jirl $zero, $ra, 0
143;
144; LA64D-LABEL: convert_float_to_u8:
145; LA64D:       # %bb.0:
146; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
147; LA64D-NEXT:    movfr2gr.d $a0, $fa0
148; LA64D-NEXT:    jirl $zero, $ra, 0
149  %1 = fptoui float %a to i8
150  ret i8 %1
151}
152
153define zeroext i16 @convert_float_to_u16(float %a) nounwind {
154; LA32F-LABEL: convert_float_to_u16:
155; LA32F:       # %bb.0:
156; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
157; LA32F-NEXT:    movfr2gr.s $a0, $fa0
158; LA32F-NEXT:    jirl $zero, $ra, 0
159;
160; LA32D-LABEL: convert_float_to_u16:
161; LA32D:       # %bb.0:
162; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
163; LA32D-NEXT:    movfr2gr.s $a0, $fa0
164; LA32D-NEXT:    jirl $zero, $ra, 0
165;
166; LA64F-LABEL: convert_float_to_u16:
167; LA64F:       # %bb.0:
168; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
169; LA64F-NEXT:    movfr2gr.s $a0, $fa0
170; LA64F-NEXT:    jirl $zero, $ra, 0
171;
172; LA64D-LABEL: convert_float_to_u16:
173; LA64D:       # %bb.0:
174; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
175; LA64D-NEXT:    movfr2gr.d $a0, $fa0
176; LA64D-NEXT:    jirl $zero, $ra, 0
177  %1 = fptoui float %a to i16
178  ret i16 %1
179}
180
181define i32 @convert_float_to_u32(float %a) nounwind {
182; LA32F-LABEL: convert_float_to_u32:
183; LA32F:       # %bb.0:
184; LA32F-NEXT:    pcalau12i $a0, .LCPI6_0
185; LA32F-NEXT:    addi.w $a0, $a0, .LCPI6_0
186; LA32F-NEXT:    fld.s $fa1, $a0, 0
187; LA32F-NEXT:    fsub.s $fa2, $fa0, $fa1
188; LA32F-NEXT:    ftintrz.w.s $fa2, $fa2
189; LA32F-NEXT:    movfr2gr.s $a0, $fa2
190; LA32F-NEXT:    lu12i.w $a1, -524288
191; LA32F-NEXT:    xor $a0, $a0, $a1
192; LA32F-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
193; LA32F-NEXT:    movcf2gr $a1, $fcc0
194; LA32F-NEXT:    masknez $a0, $a0, $a1
195; LA32F-NEXT:    ftintrz.w.s $fa0, $fa0
196; LA32F-NEXT:    movfr2gr.s $a2, $fa0
197; LA32F-NEXT:    maskeqz $a1, $a2, $a1
198; LA32F-NEXT:    or $a0, $a1, $a0
199; LA32F-NEXT:    jirl $zero, $ra, 0
200;
201; LA32D-LABEL: convert_float_to_u32:
202; LA32D:       # %bb.0:
203; LA32D-NEXT:    pcalau12i $a0, .LCPI6_0
204; LA32D-NEXT:    addi.w $a0, $a0, .LCPI6_0
205; LA32D-NEXT:    fld.s $fa1, $a0, 0
206; LA32D-NEXT:    fsub.s $fa2, $fa0, $fa1
207; LA32D-NEXT:    ftintrz.w.s $fa2, $fa2
208; LA32D-NEXT:    movfr2gr.s $a0, $fa2
209; LA32D-NEXT:    lu12i.w $a1, -524288
210; LA32D-NEXT:    xor $a0, $a0, $a1
211; LA32D-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
212; LA32D-NEXT:    movcf2gr $a1, $fcc0
213; LA32D-NEXT:    masknez $a0, $a0, $a1
214; LA32D-NEXT:    ftintrz.w.s $fa0, $fa0
215; LA32D-NEXT:    movfr2gr.s $a2, $fa0
216; LA32D-NEXT:    maskeqz $a1, $a2, $a1
217; LA32D-NEXT:    or $a0, $a1, $a0
218; LA32D-NEXT:    jirl $zero, $ra, 0
219;
220; LA64F-LABEL: convert_float_to_u32:
221; LA64F:       # %bb.0:
222; LA64F-NEXT:    pcalau12i $a0, .LCPI6_0
223; LA64F-NEXT:    addi.d $a0, $a0, .LCPI6_0
224; LA64F-NEXT:    fld.s $fa1, $a0, 0
225; LA64F-NEXT:    fsub.s $fa2, $fa0, $fa1
226; LA64F-NEXT:    ftintrz.w.s $fa2, $fa2
227; LA64F-NEXT:    movfr2gr.s $a0, $fa2
228; LA64F-NEXT:    lu12i.w $a1, -524288
229; LA64F-NEXT:    xor $a0, $a0, $a1
230; LA64F-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
231; LA64F-NEXT:    movcf2gr $a1, $fcc0
232; LA64F-NEXT:    masknez $a0, $a0, $a1
233; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
234; LA64F-NEXT:    movfr2gr.s $a2, $fa0
235; LA64F-NEXT:    maskeqz $a1, $a2, $a1
236; LA64F-NEXT:    or $a0, $a1, $a0
237; LA64F-NEXT:    jirl $zero, $ra, 0
238;
239; LA64D-LABEL: convert_float_to_u32:
240; LA64D:       # %bb.0:
241; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
242; LA64D-NEXT:    movfr2gr.d $a0, $fa0
243; LA64D-NEXT:    jirl $zero, $ra, 0
244  %1 = fptoui float %a to i32
245  ret i32 %1
246}
247
248define i64 @convert_float_to_u64(float %a) nounwind {
249; LA32F-LABEL: convert_float_to_u64:
250; LA32F:       # %bb.0:
251; LA32F-NEXT:    addi.w $sp, $sp, -16
252; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
253; LA32F-NEXT:    bl __fixunssfdi
254; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
255; LA32F-NEXT:    addi.w $sp, $sp, 16
256; LA32F-NEXT:    jirl $zero, $ra, 0
257;
258; LA32D-LABEL: convert_float_to_u64:
259; LA32D:       # %bb.0:
260; LA32D-NEXT:    addi.w $sp, $sp, -16
261; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
262; LA32D-NEXT:    bl __fixunssfdi
263; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
264; LA32D-NEXT:    addi.w $sp, $sp, 16
265; LA32D-NEXT:    jirl $zero, $ra, 0
266;
267; LA64F-LABEL: convert_float_to_u64:
268; LA64F:       # %bb.0:
269; LA64F-NEXT:    pcalau12i $a0, .LCPI7_0
270; LA64F-NEXT:    addi.d $a0, $a0, .LCPI7_0
271; LA64F-NEXT:    fld.s $fa1, $a0, 0
272; LA64F-NEXT:    fsub.s $fa2, $fa0, $fa1
273; LA64F-NEXT:    ftintrz.w.s $fa2, $fa2
274; LA64F-NEXT:    movfr2gr.s $a0, $fa2
275; LA64F-NEXT:    lu52i.d $a1, $zero, -2048
276; LA64F-NEXT:    xor $a0, $a0, $a1
277; LA64F-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
278; LA64F-NEXT:    movcf2gr $a1, $fcc0
279; LA64F-NEXT:    masknez $a0, $a0, $a1
280; LA64F-NEXT:    ftintrz.w.s $fa0, $fa0
281; LA64F-NEXT:    movfr2gr.s $a2, $fa0
282; LA64F-NEXT:    maskeqz $a1, $a2, $a1
283; LA64F-NEXT:    or $a0, $a1, $a0
284; LA64F-NEXT:    jirl $zero, $ra, 0
285;
286; LA64D-LABEL: convert_float_to_u64:
287; LA64D:       # %bb.0:
288; LA64D-NEXT:    pcalau12i $a0, .LCPI7_0
289; LA64D-NEXT:    addi.d $a0, $a0, .LCPI7_0
290; LA64D-NEXT:    fld.s $fa1, $a0, 0
291; LA64D-NEXT:    fsub.s $fa2, $fa0, $fa1
292; LA64D-NEXT:    ftintrz.l.s $fa2, $fa2
293; LA64D-NEXT:    movfr2gr.d $a0, $fa2
294; LA64D-NEXT:    lu52i.d $a1, $zero, -2048
295; LA64D-NEXT:    xor $a0, $a0, $a1
296; LA64D-NEXT:    fcmp.clt.s $fcc0, $fa0, $fa1
297; LA64D-NEXT:    movcf2gr $a1, $fcc0
298; LA64D-NEXT:    masknez $a0, $a0, $a1
299; LA64D-NEXT:    ftintrz.l.s $fa0, $fa0
300; LA64D-NEXT:    movfr2gr.d $a2, $fa0
301; LA64D-NEXT:    maskeqz $a1, $a2, $a1
302; LA64D-NEXT:    or $a0, $a1, $a0
303; LA64D-NEXT:    jirl $zero, $ra, 0
304  %1 = fptoui float %a to i64
305  ret i64 %1
306}
307
308define float @convert_i8_to_float(i8 signext %a) nounwind {
309; LA32F-LABEL: convert_i8_to_float:
310; LA32F:       # %bb.0:
311; LA32F-NEXT:    movgr2fr.w $fa0, $a0
312; LA32F-NEXT:    ffint.s.w $fa0, $fa0
313; LA32F-NEXT:    jirl $zero, $ra, 0
314;
315; LA32D-LABEL: convert_i8_to_float:
316; LA32D:       # %bb.0:
317; LA32D-NEXT:    movgr2fr.w $fa0, $a0
318; LA32D-NEXT:    ffint.s.w $fa0, $fa0
319; LA32D-NEXT:    jirl $zero, $ra, 0
320;
321; LA64F-LABEL: convert_i8_to_float:
322; LA64F:       # %bb.0:
323; LA64F-NEXT:    movgr2fr.w $fa0, $a0
324; LA64F-NEXT:    ffint.s.w $fa0, $fa0
325; LA64F-NEXT:    jirl $zero, $ra, 0
326;
327; LA64D-LABEL: convert_i8_to_float:
328; LA64D:       # %bb.0:
329; LA64D-NEXT:    movgr2fr.w $fa0, $a0
330; LA64D-NEXT:    ffint.s.w $fa0, $fa0
331; LA64D-NEXT:    jirl $zero, $ra, 0
332  %1 = sitofp i8 %a to float
333  ret float %1
334}
335
336define float @convert_i16_to_float(i16 signext %a) nounwind {
337; LA32F-LABEL: convert_i16_to_float:
338; LA32F:       # %bb.0:
339; LA32F-NEXT:    movgr2fr.w $fa0, $a0
340; LA32F-NEXT:    ffint.s.w $fa0, $fa0
341; LA32F-NEXT:    jirl $zero, $ra, 0
342;
343; LA32D-LABEL: convert_i16_to_float:
344; LA32D:       # %bb.0:
345; LA32D-NEXT:    movgr2fr.w $fa0, $a0
346; LA32D-NEXT:    ffint.s.w $fa0, $fa0
347; LA32D-NEXT:    jirl $zero, $ra, 0
348;
349; LA64F-LABEL: convert_i16_to_float:
350; LA64F:       # %bb.0:
351; LA64F-NEXT:    movgr2fr.w $fa0, $a0
352; LA64F-NEXT:    ffint.s.w $fa0, $fa0
353; LA64F-NEXT:    jirl $zero, $ra, 0
354;
355; LA64D-LABEL: convert_i16_to_float:
356; LA64D:       # %bb.0:
357; LA64D-NEXT:    movgr2fr.w $fa0, $a0
358; LA64D-NEXT:    ffint.s.w $fa0, $fa0
359; LA64D-NEXT:    jirl $zero, $ra, 0
360  %1 = sitofp i16 %a to float
361  ret float %1
362}
363
364define float @convert_i32_to_float(i32 %a) nounwind {
365; LA32F-LABEL: convert_i32_to_float:
366; LA32F:       # %bb.0:
367; LA32F-NEXT:    movgr2fr.w $fa0, $a0
368; LA32F-NEXT:    ffint.s.w $fa0, $fa0
369; LA32F-NEXT:    jirl $zero, $ra, 0
370;
371; LA32D-LABEL: convert_i32_to_float:
372; LA32D:       # %bb.0:
373; LA32D-NEXT:    movgr2fr.w $fa0, $a0
374; LA32D-NEXT:    ffint.s.w $fa0, $fa0
375; LA32D-NEXT:    jirl $zero, $ra, 0
376;
377; LA64F-LABEL: convert_i32_to_float:
378; LA64F:       # %bb.0:
379; LA64F-NEXT:    addi.w $a0, $a0, 0
380; LA64F-NEXT:    movgr2fr.w $fa0, $a0
381; LA64F-NEXT:    ffint.s.w $fa0, $fa0
382; LA64F-NEXT:    jirl $zero, $ra, 0
383;
384; LA64D-LABEL: convert_i32_to_float:
385; LA64D:       # %bb.0:
386; LA64D-NEXT:    addi.w $a0, $a0, 0
387; LA64D-NEXT:    movgr2fr.w $fa0, $a0
388; LA64D-NEXT:    ffint.s.w $fa0, $fa0
389; LA64D-NEXT:    jirl $zero, $ra, 0
390  %1 = sitofp i32 %a to float
391  ret float %1
392}
393
394define float @convert_i64_to_float(i64 %a) nounwind {
395; LA32F-LABEL: convert_i64_to_float:
396; LA32F:       # %bb.0:
397; LA32F-NEXT:    addi.w $sp, $sp, -16
398; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
399; LA32F-NEXT:    bl __floatdisf
400; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
401; LA32F-NEXT:    addi.w $sp, $sp, 16
402; LA32F-NEXT:    jirl $zero, $ra, 0
403;
404; LA32D-LABEL: convert_i64_to_float:
405; LA32D:       # %bb.0:
406; LA32D-NEXT:    addi.w $sp, $sp, -16
407; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
408; LA32D-NEXT:    bl __floatdisf
409; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
410; LA32D-NEXT:    addi.w $sp, $sp, 16
411; LA32D-NEXT:    jirl $zero, $ra, 0
412;
413; LA64F-LABEL: convert_i64_to_float:
414; LA64F:       # %bb.0:
415; LA64F-NEXT:    movgr2fr.w $fa0, $a0
416; LA64F-NEXT:    ffint.s.w $fa0, $fa0
417; LA64F-NEXT:    jirl $zero, $ra, 0
418;
419; LA64D-LABEL: convert_i64_to_float:
420; LA64D:       # %bb.0:
421; LA64D-NEXT:    movgr2fr.w $fa0, $a0
422; LA64D-NEXT:    ffint.s.w $fa0, $fa0
423; LA64D-NEXT:    jirl $zero, $ra, 0
424  %1 = sitofp i64 %a to float
425  ret float %1
426}
427
428define float @convert_u8_to_float(i8 zeroext %a) nounwind {
429; LA32F-LABEL: convert_u8_to_float:
430; LA32F:       # %bb.0:
431; LA32F-NEXT:    movgr2fr.w $fa0, $a0
432; LA32F-NEXT:    ffint.s.w $fa0, $fa0
433; LA32F-NEXT:    jirl $zero, $ra, 0
434;
435; LA32D-LABEL: convert_u8_to_float:
436; LA32D:       # %bb.0:
437; LA32D-NEXT:    movgr2fr.w $fa0, $a0
438; LA32D-NEXT:    ffint.s.w $fa0, $fa0
439; LA32D-NEXT:    jirl $zero, $ra, 0
440;
441; LA64F-LABEL: convert_u8_to_float:
442; LA64F:       # %bb.0:
443; LA64F-NEXT:    movgr2fr.w $fa0, $a0
444; LA64F-NEXT:    ffint.s.w $fa0, $fa0
445; LA64F-NEXT:    jirl $zero, $ra, 0
446;
447; LA64D-LABEL: convert_u8_to_float:
448; LA64D:       # %bb.0:
449; LA64D-NEXT:    movgr2fr.w $fa0, $a0
450; LA64D-NEXT:    ffint.s.w $fa0, $fa0
451; LA64D-NEXT:    jirl $zero, $ra, 0
452  %1 = uitofp i8 %a to float
453  ret float %1
454}
455
456define float @convert_u16_to_float(i16 zeroext %a) nounwind {
457; LA32F-LABEL: convert_u16_to_float:
458; LA32F:       # %bb.0:
459; LA32F-NEXT:    movgr2fr.w $fa0, $a0
460; LA32F-NEXT:    ffint.s.w $fa0, $fa0
461; LA32F-NEXT:    jirl $zero, $ra, 0
462;
463; LA32D-LABEL: convert_u16_to_float:
464; LA32D:       # %bb.0:
465; LA32D-NEXT:    movgr2fr.w $fa0, $a0
466; LA32D-NEXT:    ffint.s.w $fa0, $fa0
467; LA32D-NEXT:    jirl $zero, $ra, 0
468;
469; LA64F-LABEL: convert_u16_to_float:
470; LA64F:       # %bb.0:
471; LA64F-NEXT:    movgr2fr.w $fa0, $a0
472; LA64F-NEXT:    ffint.s.w $fa0, $fa0
473; LA64F-NEXT:    jirl $zero, $ra, 0
474;
475; LA64D-LABEL: convert_u16_to_float:
476; LA64D:       # %bb.0:
477; LA64D-NEXT:    movgr2fr.w $fa0, $a0
478; LA64D-NEXT:    ffint.s.w $fa0, $fa0
479; LA64D-NEXT:    jirl $zero, $ra, 0
480  %1 = uitofp i16 %a to float
481  ret float %1
482}
483
484define float @convert_u32_to_float(i32 %a) nounwind {
485; LA32F-LABEL: convert_u32_to_float:
486; LA32F:       # %bb.0:
487; LA32F-NEXT:    srli.w $a1, $a0, 1
488; LA32F-NEXT:    andi $a2, $a0, 1
489; LA32F-NEXT:    or $a1, $a2, $a1
490; LA32F-NEXT:    movgr2fr.w $fa0, $a1
491; LA32F-NEXT:    ffint.s.w $fa0, $fa0
492; LA32F-NEXT:    fadd.s $fa0, $fa0, $fa0
493; LA32F-NEXT:    slti $a1, $a0, 0
494; LA32F-NEXT:    movgr2cf $fcc0, $a1
495; LA32F-NEXT:    movgr2fr.w $fa1, $a0
496; LA32F-NEXT:    ffint.s.w $fa1, $fa1
497; LA32F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
498; LA32F-NEXT:    jirl $zero, $ra, 0
499;
500; LA32D-LABEL: convert_u32_to_float:
501; LA32D:       # %bb.0:
502; LA32D-NEXT:    addi.w $sp, $sp, -16
503; LA32D-NEXT:    addi.w $a1, $sp, 8
504; LA32D-NEXT:    ori $a1, $a1, 4
505; LA32D-NEXT:    lu12i.w $a2, 275200
506; LA32D-NEXT:    st.w $a2, $a1, 0
507; LA32D-NEXT:    st.w $a0, $sp, 8
508; LA32D-NEXT:    pcalau12i $a0, .LCPI14_0
509; LA32D-NEXT:    addi.w $a0, $a0, .LCPI14_0
510; LA32D-NEXT:    fld.d $fa0, $a0, 0
511; LA32D-NEXT:    fld.d $fa1, $sp, 8
512; LA32D-NEXT:    fsub.d $fa0, $fa1, $fa0
513; LA32D-NEXT:    fcvt.s.d $fa0, $fa0
514; LA32D-NEXT:    addi.w $sp, $sp, 16
515; LA32D-NEXT:    jirl $zero, $ra, 0
516;
517; LA64F-LABEL: convert_u32_to_float:
518; LA64F:       # %bb.0:
519; LA64F-NEXT:    bstrpick.d $a1, $a0, 31, 1
520; LA64F-NEXT:    andi $a2, $a0, 1
521; LA64F-NEXT:    or $a1, $a2, $a1
522; LA64F-NEXT:    movgr2fr.w $fa0, $a1
523; LA64F-NEXT:    ffint.s.w $fa0, $fa0
524; LA64F-NEXT:    fadd.s $fa0, $fa0, $fa0
525; LA64F-NEXT:    bstrpick.d $a0, $a0, 31, 0
526; LA64F-NEXT:    slti $a1, $a0, 0
527; LA64F-NEXT:    movgr2cf $fcc0, $a1
528; LA64F-NEXT:    movgr2fr.w $fa1, $a0
529; LA64F-NEXT:    ffint.s.w $fa1, $fa1
530; LA64F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
531; LA64F-NEXT:    jirl $zero, $ra, 0
532;
533; LA64D-LABEL: convert_u32_to_float:
534; LA64D:       # %bb.0:
535; LA64D-NEXT:    bstrpick.d $a1, $a0, 31, 1
536; LA64D-NEXT:    andi $a2, $a0, 1
537; LA64D-NEXT:    or $a1, $a2, $a1
538; LA64D-NEXT:    movgr2fr.w $fa0, $a1
539; LA64D-NEXT:    ffint.s.w $fa0, $fa0
540; LA64D-NEXT:    fadd.s $fa0, $fa0, $fa0
541; LA64D-NEXT:    bstrpick.d $a0, $a0, 31, 0
542; LA64D-NEXT:    slti $a1, $a0, 0
543; LA64D-NEXT:    movgr2cf $fcc0, $a1
544; LA64D-NEXT:    movgr2fr.w $fa1, $a0
545; LA64D-NEXT:    ffint.s.w $fa1, $fa1
546; LA64D-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
547; LA64D-NEXT:    jirl $zero, $ra, 0
548  %1 = uitofp i32 %a to float
549  ret float %1
550}
551
552define float @convert_u64_to_float(i64 %a) nounwind {
553; LA32F-LABEL: convert_u64_to_float:
554; LA32F:       # %bb.0:
555; LA32F-NEXT:    addi.w $sp, $sp, -16
556; LA32F-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
557; LA32F-NEXT:    bl __floatundisf
558; LA32F-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
559; LA32F-NEXT:    addi.w $sp, $sp, 16
560; LA32F-NEXT:    jirl $zero, $ra, 0
561;
562; LA32D-LABEL: convert_u64_to_float:
563; LA32D:       # %bb.0:
564; LA32D-NEXT:    addi.w $sp, $sp, -16
565; LA32D-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
566; LA32D-NEXT:    bl __floatundisf
567; LA32D-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
568; LA32D-NEXT:    addi.w $sp, $sp, 16
569; LA32D-NEXT:    jirl $zero, $ra, 0
570;
571; LA64F-LABEL: convert_u64_to_float:
572; LA64F:       # %bb.0:
573; LA64F-NEXT:    srli.d $a1, $a0, 1
574; LA64F-NEXT:    andi $a2, $a0, 1
575; LA64F-NEXT:    or $a1, $a2, $a1
576; LA64F-NEXT:    movgr2fr.w $fa0, $a1
577; LA64F-NEXT:    ffint.s.w $fa0, $fa0
578; LA64F-NEXT:    fadd.s $fa0, $fa0, $fa0
579; LA64F-NEXT:    slti $a1, $a0, 0
580; LA64F-NEXT:    movgr2cf $fcc0, $a1
581; LA64F-NEXT:    movgr2fr.w $fa1, $a0
582; LA64F-NEXT:    ffint.s.w $fa1, $fa1
583; LA64F-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
584; LA64F-NEXT:    jirl $zero, $ra, 0
585;
586; LA64D-LABEL: convert_u64_to_float:
587; LA64D:       # %bb.0:
588; LA64D-NEXT:    srli.d $a1, $a0, 1
589; LA64D-NEXT:    andi $a2, $a0, 1
590; LA64D-NEXT:    or $a1, $a2, $a1
591; LA64D-NEXT:    movgr2fr.w $fa0, $a1
592; LA64D-NEXT:    ffint.s.w $fa0, $fa0
593; LA64D-NEXT:    fadd.s $fa0, $fa0, $fa0
594; LA64D-NEXT:    slti $a1, $a0, 0
595; LA64D-NEXT:    movgr2cf $fcc0, $a1
596; LA64D-NEXT:    movgr2fr.w $fa1, $a0
597; LA64D-NEXT:    ffint.s.w $fa1, $fa1
598; LA64D-NEXT:    fsel $fa0, $fa1, $fa0, $fcc0
599; LA64D-NEXT:    jirl $zero, $ra, 0
600  %1 = uitofp i64 %a to float
601  ret float %1
602}
603
604define i32 @bitcast_float_to_i32(float %a) nounwind {
605; LA32F-LABEL: bitcast_float_to_i32:
606; LA32F:       # %bb.0:
607; LA32F-NEXT:    movfr2gr.s $a0, $fa0
608; LA32F-NEXT:    jirl $zero, $ra, 0
609;
610; LA32D-LABEL: bitcast_float_to_i32:
611; LA32D:       # %bb.0:
612; LA32D-NEXT:    movfr2gr.s $a0, $fa0
613; LA32D-NEXT:    jirl $zero, $ra, 0
614;
615; LA64F-LABEL: bitcast_float_to_i32:
616; LA64F:       # %bb.0:
617; LA64F-NEXT:    movfr2gr.s $a0, $fa0
618; LA64F-NEXT:    jirl $zero, $ra, 0
619;
620; LA64D-LABEL: bitcast_float_to_i32:
621; LA64D:       # %bb.0:
622; LA64D-NEXT:    movfr2gr.s $a0, $fa0
623; LA64D-NEXT:    jirl $zero, $ra, 0
624  %1 = bitcast float %a to i32
625  ret i32 %1
626}
627
628define float @bitcast_i32_to_float(i32 %a) nounwind {
629; LA32F-LABEL: bitcast_i32_to_float:
630; LA32F:       # %bb.0:
631; LA32F-NEXT:    movgr2fr.w $fa0, $a0
632; LA32F-NEXT:    jirl $zero, $ra, 0
633;
634; LA32D-LABEL: bitcast_i32_to_float:
635; LA32D:       # %bb.0:
636; LA32D-NEXT:    movgr2fr.w $fa0, $a0
637; LA32D-NEXT:    jirl $zero, $ra, 0
638;
639; LA64F-LABEL: bitcast_i32_to_float:
640; LA64F:       # %bb.0:
641; LA64F-NEXT:    movgr2fr.w $fa0, $a0
642; LA64F-NEXT:    jirl $zero, $ra, 0
643;
644; LA64D-LABEL: bitcast_i32_to_float:
645; LA64D:       # %bb.0:
646; LA64D-NEXT:    movgr2fr.w $fa0, $a0
647; LA64D-NEXT:    jirl $zero, $ra, 0
648  %1 = bitcast i32 %a to float
649  ret float %1
650}
651