1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ 3; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s 4; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ 5; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s 6; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ 7; RUN: | FileCheck -check-prefix=RV32I %s 8; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 9; RUN: | FileCheck -check-prefix=RV64I %s 10 11define float @fcvt_s_d(double %a) nounwind { 12; CHECKIFD-LABEL: fcvt_s_d: 13; CHECKIFD: # %bb.0: 14; CHECKIFD-NEXT: fcvt.s.d fa0, fa0 15; CHECKIFD-NEXT: ret 16; 17; RV32I-LABEL: fcvt_s_d: 18; RV32I: # %bb.0: 19; RV32I-NEXT: addi sp, sp, -16 20; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 21; RV32I-NEXT: call __truncdfsf2@plt 22; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 23; RV32I-NEXT: addi sp, sp, 16 24; RV32I-NEXT: ret 25; 26; RV64I-LABEL: fcvt_s_d: 27; RV64I: # %bb.0: 28; RV64I-NEXT: addi sp, sp, -16 29; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 30; RV64I-NEXT: call __truncdfsf2@plt 31; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 32; RV64I-NEXT: addi sp, sp, 16 33; RV64I-NEXT: ret 34 %1 = fptrunc double %a to float 35 ret float %1 36} 37 38define double @fcvt_d_s(float %a) nounwind { 39; CHECKIFD-LABEL: fcvt_d_s: 40; CHECKIFD: # %bb.0: 41; CHECKIFD-NEXT: fcvt.d.s fa0, fa0 42; CHECKIFD-NEXT: ret 43; 44; RV32I-LABEL: fcvt_d_s: 45; RV32I: # %bb.0: 46; RV32I-NEXT: addi sp, sp, -16 47; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 48; RV32I-NEXT: call __extendsfdf2@plt 49; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 50; RV32I-NEXT: addi sp, sp, 16 51; RV32I-NEXT: ret 52; 53; RV64I-LABEL: fcvt_d_s: 54; RV64I: # %bb.0: 55; RV64I-NEXT: addi sp, sp, -16 56; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 57; RV64I-NEXT: call __extendsfdf2@plt 58; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 59; RV64I-NEXT: addi sp, sp, 16 60; RV64I-NEXT: ret 61 %1 = fpext float %a to double 62 ret double %1 63} 64 65define i32 @fcvt_w_d(double %a) nounwind { 66; CHECKIFD-LABEL: fcvt_w_d: 67; CHECKIFD: # %bb.0: 68; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz 69; CHECKIFD-NEXT: ret 70; 71; RV32I-LABEL: fcvt_w_d: 72; RV32I: # %bb.0: 73; RV32I-NEXT: addi sp, sp, -16 74; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 75; RV32I-NEXT: call __fixdfsi@plt 76; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 77; RV32I-NEXT: addi sp, sp, 16 78; RV32I-NEXT: ret 79; 80; RV64I-LABEL: fcvt_w_d: 81; RV64I: # %bb.0: 82; RV64I-NEXT: addi sp, sp, -16 83; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 84; RV64I-NEXT: call __fixdfsi@plt 85; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 86; RV64I-NEXT: addi sp, sp, 16 87; RV64I-NEXT: ret 88 %1 = fptosi double %a to i32 89 ret i32 %1 90} 91 92define i32 @fcvt_w_d_sat(double %a) nounwind { 93; CHECKIFD-LABEL: fcvt_w_d_sat: 94; CHECKIFD: # %bb.0: # %start 95; CHECKIFD-NEXT: feq.d a0, fa0, fa0 96; CHECKIFD-NEXT: beqz a0, .LBB3_2 97; CHECKIFD-NEXT: # %bb.1: 98; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz 99; CHECKIFD-NEXT: .LBB3_2: # %start 100; CHECKIFD-NEXT: ret 101; 102; RV32I-LABEL: fcvt_w_d_sat: 103; RV32I: # %bb.0: # %start 104; RV32I-NEXT: addi sp, sp, -32 105; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 106; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 107; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 108; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 109; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 110; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill 111; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill 112; RV32I-NEXT: mv s1, a1 113; RV32I-NEXT: mv s2, a0 114; RV32I-NEXT: lui a3, 794112 115; RV32I-NEXT: li s0, 0 116; RV32I-NEXT: li a2, 0 117; RV32I-NEXT: call __gedf2@plt 118; RV32I-NEXT: mv s3, a0 119; RV32I-NEXT: mv a0, s2 120; RV32I-NEXT: mv a1, s1 121; RV32I-NEXT: call __fixdfsi@plt 122; RV32I-NEXT: lui s5, 524288 123; RV32I-NEXT: lui s4, 524288 124; RV32I-NEXT: blt s3, s0, .LBB3_2 125; RV32I-NEXT: # %bb.1: # %start 126; RV32I-NEXT: mv s4, a0 127; RV32I-NEXT: .LBB3_2: # %start 128; RV32I-NEXT: lui a0, 269824 129; RV32I-NEXT: addi a3, a0, -1 130; RV32I-NEXT: lui a2, 1047552 131; RV32I-NEXT: mv a0, s2 132; RV32I-NEXT: mv a1, s1 133; RV32I-NEXT: call __gtdf2@plt 134; RV32I-NEXT: bge s0, a0, .LBB3_4 135; RV32I-NEXT: # %bb.3: 136; RV32I-NEXT: addi s4, s5, -1 137; RV32I-NEXT: .LBB3_4: # %start 138; RV32I-NEXT: mv a0, s2 139; RV32I-NEXT: mv a1, s1 140; RV32I-NEXT: mv a2, s2 141; RV32I-NEXT: mv a3, s1 142; RV32I-NEXT: call __unorddf2@plt 143; RV32I-NEXT: bne a0, s0, .LBB3_6 144; RV32I-NEXT: # %bb.5: # %start 145; RV32I-NEXT: mv s0, s4 146; RV32I-NEXT: .LBB3_6: # %start 147; RV32I-NEXT: mv a0, s0 148; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 149; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 150; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 151; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 152; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 153; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload 154; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload 155; RV32I-NEXT: addi sp, sp, 32 156; RV32I-NEXT: ret 157; 158; RV64I-LABEL: fcvt_w_d_sat: 159; RV64I: # %bb.0: # %start 160; RV64I-NEXT: addi sp, sp, -48 161; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill 162; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill 163; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill 164; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill 165; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill 166; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill 167; RV64I-NEXT: mv s0, a0 168; RV64I-NEXT: li a0, -497 169; RV64I-NEXT: slli a1, a0, 53 170; RV64I-NEXT: mv a0, s0 171; RV64I-NEXT: call __gedf2@plt 172; RV64I-NEXT: mv s2, a0 173; RV64I-NEXT: mv a0, s0 174; RV64I-NEXT: call __fixdfdi@plt 175; RV64I-NEXT: li s1, 0 176; RV64I-NEXT: lui s4, 524288 177; RV64I-NEXT: lui s3, 524288 178; RV64I-NEXT: bltz s2, .LBB3_2 179; RV64I-NEXT: # %bb.1: # %start 180; RV64I-NEXT: mv s3, a0 181; RV64I-NEXT: .LBB3_2: # %start 182; RV64I-NEXT: li a0, 527 183; RV64I-NEXT: slli a0, a0, 31 184; RV64I-NEXT: addi a0, a0, -1 185; RV64I-NEXT: slli a1, a0, 22 186; RV64I-NEXT: mv a0, s0 187; RV64I-NEXT: call __gtdf2@plt 188; RV64I-NEXT: bge s1, a0, .LBB3_4 189; RV64I-NEXT: # %bb.3: 190; RV64I-NEXT: addiw s3, s4, -1 191; RV64I-NEXT: .LBB3_4: # %start 192; RV64I-NEXT: mv a0, s0 193; RV64I-NEXT: mv a1, s0 194; RV64I-NEXT: call __unorddf2@plt 195; RV64I-NEXT: bne a0, s1, .LBB3_6 196; RV64I-NEXT: # %bb.5: # %start 197; RV64I-NEXT: mv s1, s3 198; RV64I-NEXT: .LBB3_6: # %start 199; RV64I-NEXT: mv a0, s1 200; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload 201; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload 202; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload 203; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload 204; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload 205; RV64I-NEXT: ld s4, 0(sp) # 8-byte Folded Reload 206; RV64I-NEXT: addi sp, sp, 48 207; RV64I-NEXT: ret 208start: 209 %0 = tail call i32 @llvm.fptosi.sat.i32.f64(double %a) 210 ret i32 %0 211} 212declare i32 @llvm.fptosi.sat.i32.f64(double) 213 214; For RV64D, fcvt.lu.d is semantically equivalent to fcvt.wu.d in this case 215; because fptosi will produce poison if the result doesn't fit into an i32. 216define i32 @fcvt_wu_d(double %a) nounwind { 217; CHECKIFD-LABEL: fcvt_wu_d: 218; CHECKIFD: # %bb.0: 219; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz 220; CHECKIFD-NEXT: ret 221; 222; RV32I-LABEL: fcvt_wu_d: 223; RV32I: # %bb.0: 224; RV32I-NEXT: addi sp, sp, -16 225; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 226; RV32I-NEXT: call __fixunsdfsi@plt 227; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 228; RV32I-NEXT: addi sp, sp, 16 229; RV32I-NEXT: ret 230; 231; RV64I-LABEL: fcvt_wu_d: 232; RV64I: # %bb.0: 233; RV64I-NEXT: addi sp, sp, -16 234; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 235; RV64I-NEXT: call __fixunsdfsi@plt 236; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 237; RV64I-NEXT: addi sp, sp, 16 238; RV64I-NEXT: ret 239 %1 = fptoui double %a to i32 240 ret i32 %1 241} 242 243; Test where the fptoui has multiple uses, one of which causes a sext to be 244; inserted on RV64. 245define i32 @fcvt_wu_d_multiple_use(double %x, i32* %y) nounwind { 246; CHECKIFD-LABEL: fcvt_wu_d_multiple_use: 247; CHECKIFD: # %bb.0: 248; CHECKIFD-NEXT: fcvt.wu.d a1, fa0, rtz 249; CHECKIFD-NEXT: li a0, 1 250; CHECKIFD-NEXT: beqz a1, .LBB5_2 251; CHECKIFD-NEXT: # %bb.1: 252; CHECKIFD-NEXT: mv a0, a1 253; CHECKIFD-NEXT: .LBB5_2: 254; CHECKIFD-NEXT: ret 255; 256; RV32I-LABEL: fcvt_wu_d_multiple_use: 257; RV32I: # %bb.0: 258; RV32I-NEXT: addi sp, sp, -16 259; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 260; RV32I-NEXT: call __fixunsdfsi@plt 261; RV32I-NEXT: mv a1, a0 262; RV32I-NEXT: li a0, 1 263; RV32I-NEXT: beqz a1, .LBB5_2 264; RV32I-NEXT: # %bb.1: 265; RV32I-NEXT: mv a0, a1 266; RV32I-NEXT: .LBB5_2: 267; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 268; RV32I-NEXT: addi sp, sp, 16 269; RV32I-NEXT: ret 270; 271; RV64I-LABEL: fcvt_wu_d_multiple_use: 272; RV64I: # %bb.0: 273; RV64I-NEXT: addi sp, sp, -16 274; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 275; RV64I-NEXT: call __fixunsdfsi@plt 276; RV64I-NEXT: mv a1, a0 277; RV64I-NEXT: li a0, 1 278; RV64I-NEXT: beqz a1, .LBB5_2 279; RV64I-NEXT: # %bb.1: 280; RV64I-NEXT: mv a0, a1 281; RV64I-NEXT: .LBB5_2: 282; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 283; RV64I-NEXT: addi sp, sp, 16 284; RV64I-NEXT: ret 285 %a = fptoui double %x to i32 286 %b = icmp eq i32 %a, 0 287 %c = select i1 %b, i32 1, i32 %a 288 ret i32 %c 289} 290 291define i32 @fcvt_wu_d_sat(double %a) nounwind { 292; CHECKIFD-LABEL: fcvt_wu_d_sat: 293; CHECKIFD: # %bb.0: # %start 294; CHECKIFD-NEXT: feq.d a0, fa0, fa0 295; CHECKIFD-NEXT: beqz a0, .LBB6_2 296; CHECKIFD-NEXT: # %bb.1: 297; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz 298; CHECKIFD-NEXT: .LBB6_2: # %start 299; CHECKIFD-NEXT: ret 300; 301; RV32I-LABEL: fcvt_wu_d_sat: 302; RV32I: # %bb.0: # %start 303; RV32I-NEXT: addi sp, sp, -32 304; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 305; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 306; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 307; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 308; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 309; RV32I-NEXT: mv s1, a1 310; RV32I-NEXT: mv s2, a0 311; RV32I-NEXT: lui a0, 270080 312; RV32I-NEXT: addi a3, a0, -1 313; RV32I-NEXT: lui a2, 1048064 314; RV32I-NEXT: mv a0, s2 315; RV32I-NEXT: call __gtdf2@plt 316; RV32I-NEXT: mv s0, a0 317; RV32I-NEXT: mv a0, s2 318; RV32I-NEXT: mv a1, s1 319; RV32I-NEXT: li a2, 0 320; RV32I-NEXT: li a3, 0 321; RV32I-NEXT: call __gedf2@plt 322; RV32I-NEXT: mv s3, a0 323; RV32I-NEXT: mv a0, s2 324; RV32I-NEXT: mv a1, s1 325; RV32I-NEXT: call __fixunsdfsi@plt 326; RV32I-NEXT: li a1, 0 327; RV32I-NEXT: bltz s3, .LBB6_2 328; RV32I-NEXT: # %bb.1: # %start 329; RV32I-NEXT: mv a1, a0 330; RV32I-NEXT: .LBB6_2: # %start 331; RV32I-NEXT: li a0, -1 332; RV32I-NEXT: bgtz s0, .LBB6_4 333; RV32I-NEXT: # %bb.3: # %start 334; RV32I-NEXT: mv a0, a1 335; RV32I-NEXT: .LBB6_4: # %start 336; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 337; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 338; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 339; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 340; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 341; RV32I-NEXT: addi sp, sp, 32 342; RV32I-NEXT: ret 343; 344; RV64I-LABEL: fcvt_wu_d_sat: 345; RV64I: # %bb.0: # %start 346; RV64I-NEXT: addi sp, sp, -32 347; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 348; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 349; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 350; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill 351; RV64I-NEXT: mv s0, a0 352; RV64I-NEXT: li a1, 0 353; RV64I-NEXT: call __gedf2@plt 354; RV64I-NEXT: mv s2, a0 355; RV64I-NEXT: mv a0, s0 356; RV64I-NEXT: call __fixunsdfdi@plt 357; RV64I-NEXT: li s1, 0 358; RV64I-NEXT: bltz s2, .LBB6_2 359; RV64I-NEXT: # %bb.1: # %start 360; RV64I-NEXT: mv s1, a0 361; RV64I-NEXT: .LBB6_2: # %start 362; RV64I-NEXT: li a0, 1055 363; RV64I-NEXT: slli a0, a0, 31 364; RV64I-NEXT: addi a0, a0, -1 365; RV64I-NEXT: slli a1, a0, 21 366; RV64I-NEXT: mv a0, s0 367; RV64I-NEXT: call __gtdf2@plt 368; RV64I-NEXT: blez a0, .LBB6_4 369; RV64I-NEXT: # %bb.3: 370; RV64I-NEXT: li a0, -1 371; RV64I-NEXT: srli s1, a0, 32 372; RV64I-NEXT: .LBB6_4: # %start 373; RV64I-NEXT: mv a0, s1 374; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 375; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 376; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 377; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload 378; RV64I-NEXT: addi sp, sp, 32 379; RV64I-NEXT: ret 380start: 381 %0 = tail call i32 @llvm.fptoui.sat.i32.f64(double %a) 382 ret i32 %0 383} 384declare i32 @llvm.fptoui.sat.i32.f64(double) 385 386define double @fcvt_d_w(i32 %a) nounwind { 387; CHECKIFD-LABEL: fcvt_d_w: 388; CHECKIFD: # %bb.0: 389; CHECKIFD-NEXT: fcvt.d.w fa0, a0 390; CHECKIFD-NEXT: ret 391; 392; RV32I-LABEL: fcvt_d_w: 393; RV32I: # %bb.0: 394; RV32I-NEXT: addi sp, sp, -16 395; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 396; RV32I-NEXT: call __floatsidf@plt 397; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 398; RV32I-NEXT: addi sp, sp, 16 399; RV32I-NEXT: ret 400; 401; RV64I-LABEL: fcvt_d_w: 402; RV64I: # %bb.0: 403; RV64I-NEXT: addi sp, sp, -16 404; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 405; RV64I-NEXT: sext.w a0, a0 406; RV64I-NEXT: call __floatsidf@plt 407; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 408; RV64I-NEXT: addi sp, sp, 16 409; RV64I-NEXT: ret 410 %1 = sitofp i32 %a to double 411 ret double %1 412} 413 414define double @fcvt_d_w_load(i32* %p) nounwind { 415; CHECKIFD-LABEL: fcvt_d_w_load: 416; CHECKIFD: # %bb.0: 417; CHECKIFD-NEXT: lw a0, 0(a0) 418; CHECKIFD-NEXT: fcvt.d.w fa0, a0 419; CHECKIFD-NEXT: ret 420; 421; RV32I-LABEL: fcvt_d_w_load: 422; RV32I: # %bb.0: 423; RV32I-NEXT: addi sp, sp, -16 424; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 425; RV32I-NEXT: lw a0, 0(a0) 426; RV32I-NEXT: call __floatsidf@plt 427; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 428; RV32I-NEXT: addi sp, sp, 16 429; RV32I-NEXT: ret 430; 431; RV64I-LABEL: fcvt_d_w_load: 432; RV64I: # %bb.0: 433; RV64I-NEXT: addi sp, sp, -16 434; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 435; RV64I-NEXT: lw a0, 0(a0) 436; RV64I-NEXT: call __floatsidf@plt 437; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 438; RV64I-NEXT: addi sp, sp, 16 439; RV64I-NEXT: ret 440 %a = load i32, i32* %p 441 %1 = sitofp i32 %a to double 442 ret double %1 443} 444 445define double @fcvt_d_wu(i32 %a) nounwind { 446; CHECKIFD-LABEL: fcvt_d_wu: 447; CHECKIFD: # %bb.0: 448; CHECKIFD-NEXT: fcvt.d.wu fa0, a0 449; CHECKIFD-NEXT: ret 450; 451; RV32I-LABEL: fcvt_d_wu: 452; RV32I: # %bb.0: 453; RV32I-NEXT: addi sp, sp, -16 454; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 455; RV32I-NEXT: call __floatunsidf@plt 456; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 457; RV32I-NEXT: addi sp, sp, 16 458; RV32I-NEXT: ret 459; 460; RV64I-LABEL: fcvt_d_wu: 461; RV64I: # %bb.0: 462; RV64I-NEXT: addi sp, sp, -16 463; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 464; RV64I-NEXT: sext.w a0, a0 465; RV64I-NEXT: call __floatunsidf@plt 466; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 467; RV64I-NEXT: addi sp, sp, 16 468; RV64I-NEXT: ret 469 %1 = uitofp i32 %a to double 470 ret double %1 471} 472 473define double @fcvt_d_wu_load(i32* %p) nounwind { 474; RV32IFD-LABEL: fcvt_d_wu_load: 475; RV32IFD: # %bb.0: 476; RV32IFD-NEXT: lw a0, 0(a0) 477; RV32IFD-NEXT: fcvt.d.wu fa0, a0 478; RV32IFD-NEXT: ret 479; 480; RV64IFD-LABEL: fcvt_d_wu_load: 481; RV64IFD: # %bb.0: 482; RV64IFD-NEXT: lwu a0, 0(a0) 483; RV64IFD-NEXT: fcvt.d.wu fa0, a0 484; RV64IFD-NEXT: ret 485; 486; RV32I-LABEL: fcvt_d_wu_load: 487; RV32I: # %bb.0: 488; RV32I-NEXT: addi sp, sp, -16 489; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 490; RV32I-NEXT: lw a0, 0(a0) 491; RV32I-NEXT: call __floatunsidf@plt 492; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 493; RV32I-NEXT: addi sp, sp, 16 494; RV32I-NEXT: ret 495; 496; RV64I-LABEL: fcvt_d_wu_load: 497; RV64I: # %bb.0: 498; RV64I-NEXT: addi sp, sp, -16 499; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 500; RV64I-NEXT: lw a0, 0(a0) 501; RV64I-NEXT: call __floatunsidf@plt 502; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 503; RV64I-NEXT: addi sp, sp, 16 504; RV64I-NEXT: ret 505 %a = load i32, i32* %p 506 %1 = uitofp i32 %a to double 507 ret double %1 508} 509 510define i64 @fcvt_l_d(double %a) nounwind { 511; RV32IFD-LABEL: fcvt_l_d: 512; RV32IFD: # %bb.0: 513; RV32IFD-NEXT: addi sp, sp, -16 514; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 515; RV32IFD-NEXT: call __fixdfdi@plt 516; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 517; RV32IFD-NEXT: addi sp, sp, 16 518; RV32IFD-NEXT: ret 519; 520; RV64IFD-LABEL: fcvt_l_d: 521; RV64IFD: # %bb.0: 522; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 523; RV64IFD-NEXT: ret 524; 525; RV32I-LABEL: fcvt_l_d: 526; RV32I: # %bb.0: 527; RV32I-NEXT: addi sp, sp, -16 528; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 529; RV32I-NEXT: call __fixdfdi@plt 530; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 531; RV32I-NEXT: addi sp, sp, 16 532; RV32I-NEXT: ret 533; 534; RV64I-LABEL: fcvt_l_d: 535; RV64I: # %bb.0: 536; RV64I-NEXT: addi sp, sp, -16 537; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 538; RV64I-NEXT: call __fixdfdi@plt 539; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 540; RV64I-NEXT: addi sp, sp, 16 541; RV64I-NEXT: ret 542 %1 = fptosi double %a to i64 543 ret i64 %1 544} 545 546define i64 @fcvt_l_d_sat(double %a) nounwind { 547; RV32IFD-LABEL: fcvt_l_d_sat: 548; RV32IFD: # %bb.0: # %start 549; RV32IFD-NEXT: addi sp, sp, -16 550; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 551; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 552; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill 553; RV32IFD-NEXT: lui a0, %hi(.LCPI12_0) 554; RV32IFD-NEXT: fld ft0, %lo(.LCPI12_0)(a0) 555; RV32IFD-NEXT: fmv.d fs0, fa0 556; RV32IFD-NEXT: fle.d s0, ft0, fa0 557; RV32IFD-NEXT: call __fixdfdi@plt 558; RV32IFD-NEXT: mv a2, a0 559; RV32IFD-NEXT: bnez s0, .LBB12_2 560; RV32IFD-NEXT: # %bb.1: # %start 561; RV32IFD-NEXT: li a2, 0 562; RV32IFD-NEXT: .LBB12_2: # %start 563; RV32IFD-NEXT: lui a0, %hi(.LCPI12_1) 564; RV32IFD-NEXT: fld ft0, %lo(.LCPI12_1)(a0) 565; RV32IFD-NEXT: flt.d a3, ft0, fs0 566; RV32IFD-NEXT: li a0, -1 567; RV32IFD-NEXT: beqz a3, .LBB12_9 568; RV32IFD-NEXT: # %bb.3: # %start 569; RV32IFD-NEXT: feq.d a2, fs0, fs0 570; RV32IFD-NEXT: beqz a2, .LBB12_10 571; RV32IFD-NEXT: .LBB12_4: # %start 572; RV32IFD-NEXT: lui a4, 524288 573; RV32IFD-NEXT: beqz s0, .LBB12_11 574; RV32IFD-NEXT: .LBB12_5: # %start 575; RV32IFD-NEXT: bnez a3, .LBB12_12 576; RV32IFD-NEXT: .LBB12_6: # %start 577; RV32IFD-NEXT: bnez a2, .LBB12_8 578; RV32IFD-NEXT: .LBB12_7: # %start 579; RV32IFD-NEXT: li a1, 0 580; RV32IFD-NEXT: .LBB12_8: # %start 581; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 582; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 583; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload 584; RV32IFD-NEXT: addi sp, sp, 16 585; RV32IFD-NEXT: ret 586; RV32IFD-NEXT: .LBB12_9: # %start 587; RV32IFD-NEXT: mv a0, a2 588; RV32IFD-NEXT: feq.d a2, fs0, fs0 589; RV32IFD-NEXT: bnez a2, .LBB12_4 590; RV32IFD-NEXT: .LBB12_10: # %start 591; RV32IFD-NEXT: li a0, 0 592; RV32IFD-NEXT: lui a4, 524288 593; RV32IFD-NEXT: bnez s0, .LBB12_5 594; RV32IFD-NEXT: .LBB12_11: # %start 595; RV32IFD-NEXT: lui a1, 524288 596; RV32IFD-NEXT: beqz a3, .LBB12_6 597; RV32IFD-NEXT: .LBB12_12: 598; RV32IFD-NEXT: addi a1, a4, -1 599; RV32IFD-NEXT: beqz a2, .LBB12_7 600; RV32IFD-NEXT: j .LBB12_8 601; 602; RV64IFD-LABEL: fcvt_l_d_sat: 603; RV64IFD: # %bb.0: # %start 604; RV64IFD-NEXT: feq.d a0, fa0, fa0 605; RV64IFD-NEXT: beqz a0, .LBB12_2 606; RV64IFD-NEXT: # %bb.1: 607; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 608; RV64IFD-NEXT: .LBB12_2: # %start 609; RV64IFD-NEXT: ret 610; 611; RV32I-LABEL: fcvt_l_d_sat: 612; RV32I: # %bb.0: # %start 613; RV32I-NEXT: addi sp, sp, -48 614; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill 615; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill 616; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill 617; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill 618; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill 619; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill 620; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill 621; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill 622; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill 623; RV32I-NEXT: mv s0, a1 624; RV32I-NEXT: mv s1, a0 625; RV32I-NEXT: lui a0, 278016 626; RV32I-NEXT: addi s3, a0, -1 627; RV32I-NEXT: li a2, -1 628; RV32I-NEXT: mv a0, s1 629; RV32I-NEXT: mv a3, s3 630; RV32I-NEXT: call __gtdf2@plt 631; RV32I-NEXT: mv s4, a0 632; RV32I-NEXT: lui a3, 802304 633; RV32I-NEXT: li s2, 0 634; RV32I-NEXT: mv a0, s1 635; RV32I-NEXT: mv a1, s0 636; RV32I-NEXT: li a2, 0 637; RV32I-NEXT: call __gedf2@plt 638; RV32I-NEXT: mv s6, a0 639; RV32I-NEXT: mv a0, s1 640; RV32I-NEXT: mv a1, s0 641; RV32I-NEXT: call __fixdfdi@plt 642; RV32I-NEXT: mv s5, a1 643; RV32I-NEXT: mv a1, s2 644; RV32I-NEXT: blt s6, s2, .LBB12_2 645; RV32I-NEXT: # %bb.1: # %start 646; RV32I-NEXT: mv a1, a0 647; RV32I-NEXT: .LBB12_2: # %start 648; RV32I-NEXT: li s6, -1 649; RV32I-NEXT: blt s2, s4, .LBB12_4 650; RV32I-NEXT: # %bb.3: # %start 651; RV32I-NEXT: mv s6, a1 652; RV32I-NEXT: .LBB12_4: # %start 653; RV32I-NEXT: mv a0, s1 654; RV32I-NEXT: mv a1, s0 655; RV32I-NEXT: mv a2, s1 656; RV32I-NEXT: mv a3, s0 657; RV32I-NEXT: call __unorddf2@plt 658; RV32I-NEXT: mv s4, s2 659; RV32I-NEXT: bne a0, s2, .LBB12_6 660; RV32I-NEXT: # %bb.5: # %start 661; RV32I-NEXT: mv s4, s6 662; RV32I-NEXT: .LBB12_6: # %start 663; RV32I-NEXT: lui a3, 802304 664; RV32I-NEXT: mv a0, s1 665; RV32I-NEXT: mv a1, s0 666; RV32I-NEXT: mv a2, s2 667; RV32I-NEXT: call __gedf2@plt 668; RV32I-NEXT: lui s7, 524288 669; RV32I-NEXT: lui s6, 524288 670; RV32I-NEXT: blt a0, s2, .LBB12_8 671; RV32I-NEXT: # %bb.7: # %start 672; RV32I-NEXT: mv s6, s5 673; RV32I-NEXT: .LBB12_8: # %start 674; RV32I-NEXT: li a2, -1 675; RV32I-NEXT: mv a0, s1 676; RV32I-NEXT: mv a1, s0 677; RV32I-NEXT: mv a3, s3 678; RV32I-NEXT: call __gtdf2@plt 679; RV32I-NEXT: bge s2, a0, .LBB12_10 680; RV32I-NEXT: # %bb.9: 681; RV32I-NEXT: addi s6, s7, -1 682; RV32I-NEXT: .LBB12_10: # %start 683; RV32I-NEXT: mv a0, s1 684; RV32I-NEXT: mv a1, s0 685; RV32I-NEXT: mv a2, s1 686; RV32I-NEXT: mv a3, s0 687; RV32I-NEXT: call __unorddf2@plt 688; RV32I-NEXT: bne a0, s2, .LBB12_12 689; RV32I-NEXT: # %bb.11: # %start 690; RV32I-NEXT: mv s2, s6 691; RV32I-NEXT: .LBB12_12: # %start 692; RV32I-NEXT: mv a0, s4 693; RV32I-NEXT: mv a1, s2 694; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload 695; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload 696; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload 697; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload 698; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload 699; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload 700; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload 701; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload 702; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload 703; RV32I-NEXT: addi sp, sp, 48 704; RV32I-NEXT: ret 705; 706; RV64I-LABEL: fcvt_l_d_sat: 707; RV64I: # %bb.0: # %start 708; RV64I-NEXT: addi sp, sp, -48 709; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill 710; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill 711; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill 712; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill 713; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill 714; RV64I-NEXT: sd s4, 0(sp) # 8-byte Folded Spill 715; RV64I-NEXT: mv s0, a0 716; RV64I-NEXT: li a0, -481 717; RV64I-NEXT: slli a1, a0, 53 718; RV64I-NEXT: mv a0, s0 719; RV64I-NEXT: call __gedf2@plt 720; RV64I-NEXT: mv s3, a0 721; RV64I-NEXT: mv a0, s0 722; RV64I-NEXT: call __fixdfdi@plt 723; RV64I-NEXT: li s1, 0 724; RV64I-NEXT: li s4, -1 725; RV64I-NEXT: bltz s3, .LBB12_2 726; RV64I-NEXT: # %bb.1: # %start 727; RV64I-NEXT: mv s2, a0 728; RV64I-NEXT: j .LBB12_3 729; RV64I-NEXT: .LBB12_2: 730; RV64I-NEXT: slli s2, s4, 63 731; RV64I-NEXT: .LBB12_3: # %start 732; RV64I-NEXT: li a0, 543 733; RV64I-NEXT: slli a0, a0, 53 734; RV64I-NEXT: addi a1, a0, -1 735; RV64I-NEXT: mv a0, s0 736; RV64I-NEXT: call __gtdf2@plt 737; RV64I-NEXT: bge s1, a0, .LBB12_5 738; RV64I-NEXT: # %bb.4: 739; RV64I-NEXT: srli s2, s4, 1 740; RV64I-NEXT: .LBB12_5: # %start 741; RV64I-NEXT: mv a0, s0 742; RV64I-NEXT: mv a1, s0 743; RV64I-NEXT: call __unorddf2@plt 744; RV64I-NEXT: bne a0, s1, .LBB12_7 745; RV64I-NEXT: # %bb.6: # %start 746; RV64I-NEXT: mv s1, s2 747; RV64I-NEXT: .LBB12_7: # %start 748; RV64I-NEXT: mv a0, s1 749; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload 750; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload 751; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload 752; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload 753; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload 754; RV64I-NEXT: ld s4, 0(sp) # 8-byte Folded Reload 755; RV64I-NEXT: addi sp, sp, 48 756; RV64I-NEXT: ret 757start: 758 %0 = tail call i64 @llvm.fptosi.sat.i64.f64(double %a) 759 ret i64 %0 760} 761declare i64 @llvm.fptosi.sat.i64.f64(double) 762 763define i64 @fcvt_lu_d(double %a) nounwind { 764; RV32IFD-LABEL: fcvt_lu_d: 765; RV32IFD: # %bb.0: 766; RV32IFD-NEXT: addi sp, sp, -16 767; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 768; RV32IFD-NEXT: call __fixunsdfdi@plt 769; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 770; RV32IFD-NEXT: addi sp, sp, 16 771; RV32IFD-NEXT: ret 772; 773; RV64IFD-LABEL: fcvt_lu_d: 774; RV64IFD: # %bb.0: 775; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz 776; RV64IFD-NEXT: ret 777; 778; RV32I-LABEL: fcvt_lu_d: 779; RV32I: # %bb.0: 780; RV32I-NEXT: addi sp, sp, -16 781; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 782; RV32I-NEXT: call __fixunsdfdi@plt 783; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 784; RV32I-NEXT: addi sp, sp, 16 785; RV32I-NEXT: ret 786; 787; RV64I-LABEL: fcvt_lu_d: 788; RV64I: # %bb.0: 789; RV64I-NEXT: addi sp, sp, -16 790; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 791; RV64I-NEXT: call __fixunsdfdi@plt 792; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 793; RV64I-NEXT: addi sp, sp, 16 794; RV64I-NEXT: ret 795 %1 = fptoui double %a to i64 796 ret i64 %1 797} 798 799define i64 @fcvt_lu_d_sat(double %a) nounwind { 800; RV32IFD-LABEL: fcvt_lu_d_sat: 801; RV32IFD: # %bb.0: # %start 802; RV32IFD-NEXT: addi sp, sp, -16 803; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 804; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 805; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill 806; RV32IFD-NEXT: fmv.d fs0, fa0 807; RV32IFD-NEXT: fcvt.d.w ft0, zero 808; RV32IFD-NEXT: fle.d s0, ft0, fa0 809; RV32IFD-NEXT: call __fixunsdfdi@plt 810; RV32IFD-NEXT: mv a3, a0 811; RV32IFD-NEXT: bnez s0, .LBB14_2 812; RV32IFD-NEXT: # %bb.1: # %start 813; RV32IFD-NEXT: li a3, 0 814; RV32IFD-NEXT: .LBB14_2: # %start 815; RV32IFD-NEXT: lui a0, %hi(.LCPI14_0) 816; RV32IFD-NEXT: fld ft0, %lo(.LCPI14_0)(a0) 817; RV32IFD-NEXT: flt.d a4, ft0, fs0 818; RV32IFD-NEXT: li a2, -1 819; RV32IFD-NEXT: li a0, -1 820; RV32IFD-NEXT: beqz a4, .LBB14_7 821; RV32IFD-NEXT: # %bb.3: # %start 822; RV32IFD-NEXT: beqz s0, .LBB14_8 823; RV32IFD-NEXT: .LBB14_4: # %start 824; RV32IFD-NEXT: bnez a4, .LBB14_6 825; RV32IFD-NEXT: .LBB14_5: # %start 826; RV32IFD-NEXT: mv a2, a1 827; RV32IFD-NEXT: .LBB14_6: # %start 828; RV32IFD-NEXT: mv a1, a2 829; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 830; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 831; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload 832; RV32IFD-NEXT: addi sp, sp, 16 833; RV32IFD-NEXT: ret 834; RV32IFD-NEXT: .LBB14_7: # %start 835; RV32IFD-NEXT: mv a0, a3 836; RV32IFD-NEXT: bnez s0, .LBB14_4 837; RV32IFD-NEXT: .LBB14_8: # %start 838; RV32IFD-NEXT: li a1, 0 839; RV32IFD-NEXT: beqz a4, .LBB14_5 840; RV32IFD-NEXT: j .LBB14_6 841; 842; RV64IFD-LABEL: fcvt_lu_d_sat: 843; RV64IFD: # %bb.0: # %start 844; RV64IFD-NEXT: feq.d a0, fa0, fa0 845; RV64IFD-NEXT: beqz a0, .LBB14_2 846; RV64IFD-NEXT: # %bb.1: 847; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz 848; RV64IFD-NEXT: .LBB14_2: # %start 849; RV64IFD-NEXT: ret 850; 851; RV32I-LABEL: fcvt_lu_d_sat: 852; RV32I: # %bb.0: # %start 853; RV32I-NEXT: addi sp, sp, -32 854; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 855; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 856; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 857; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 858; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 859; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill 860; RV32I-NEXT: sw s5, 4(sp) # 4-byte Folded Spill 861; RV32I-NEXT: sw s6, 0(sp) # 4-byte Folded Spill 862; RV32I-NEXT: mv s1, a1 863; RV32I-NEXT: mv s2, a0 864; RV32I-NEXT: lui a0, 278272 865; RV32I-NEXT: addi s3, a0, -1 866; RV32I-NEXT: li a2, -1 867; RV32I-NEXT: li s0, -1 868; RV32I-NEXT: mv a0, s2 869; RV32I-NEXT: mv a3, s3 870; RV32I-NEXT: call __gtdf2@plt 871; RV32I-NEXT: mv s6, a0 872; RV32I-NEXT: mv a0, s2 873; RV32I-NEXT: mv a1, s1 874; RV32I-NEXT: li a2, 0 875; RV32I-NEXT: li a3, 0 876; RV32I-NEXT: call __gedf2@plt 877; RV32I-NEXT: mv s4, a0 878; RV32I-NEXT: mv a0, s2 879; RV32I-NEXT: mv a1, s1 880; RV32I-NEXT: call __fixunsdfdi@plt 881; RV32I-NEXT: mv s5, a1 882; RV32I-NEXT: li a1, 0 883; RV32I-NEXT: bltz s4, .LBB14_2 884; RV32I-NEXT: # %bb.1: # %start 885; RV32I-NEXT: mv a1, a0 886; RV32I-NEXT: .LBB14_2: # %start 887; RV32I-NEXT: li s4, -1 888; RV32I-NEXT: bgtz s6, .LBB14_4 889; RV32I-NEXT: # %bb.3: # %start 890; RV32I-NEXT: mv s4, a1 891; RV32I-NEXT: .LBB14_4: # %start 892; RV32I-NEXT: li a2, -1 893; RV32I-NEXT: mv a0, s2 894; RV32I-NEXT: mv a1, s1 895; RV32I-NEXT: mv a3, s3 896; RV32I-NEXT: call __gtdf2@plt 897; RV32I-NEXT: mv s3, a0 898; RV32I-NEXT: mv a0, s2 899; RV32I-NEXT: mv a1, s1 900; RV32I-NEXT: li a2, 0 901; RV32I-NEXT: li a3, 0 902; RV32I-NEXT: call __gedf2@plt 903; RV32I-NEXT: li a1, 0 904; RV32I-NEXT: bltz a0, .LBB14_6 905; RV32I-NEXT: # %bb.5: # %start 906; RV32I-NEXT: mv a1, s5 907; RV32I-NEXT: .LBB14_6: # %start 908; RV32I-NEXT: bgtz s3, .LBB14_8 909; RV32I-NEXT: # %bb.7: # %start 910; RV32I-NEXT: mv s0, a1 911; RV32I-NEXT: .LBB14_8: # %start 912; RV32I-NEXT: mv a0, s4 913; RV32I-NEXT: mv a1, s0 914; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 915; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 916; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 917; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 918; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 919; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload 920; RV32I-NEXT: lw s5, 4(sp) # 4-byte Folded Reload 921; RV32I-NEXT: lw s6, 0(sp) # 4-byte Folded Reload 922; RV32I-NEXT: addi sp, sp, 32 923; RV32I-NEXT: ret 924; 925; RV64I-LABEL: fcvt_lu_d_sat: 926; RV64I: # %bb.0: # %start 927; RV64I-NEXT: addi sp, sp, -32 928; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 929; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 930; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 931; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill 932; RV64I-NEXT: mv s0, a0 933; RV64I-NEXT: li a1, 0 934; RV64I-NEXT: call __gedf2@plt 935; RV64I-NEXT: mv s1, a0 936; RV64I-NEXT: mv a0, s0 937; RV64I-NEXT: call __fixunsdfdi@plt 938; RV64I-NEXT: li s2, 0 939; RV64I-NEXT: bltz s1, .LBB14_2 940; RV64I-NEXT: # %bb.1: # %start 941; RV64I-NEXT: mv s2, a0 942; RV64I-NEXT: .LBB14_2: # %start 943; RV64I-NEXT: li a0, 1087 944; RV64I-NEXT: slli a0, a0, 52 945; RV64I-NEXT: addi a1, a0, -1 946; RV64I-NEXT: mv a0, s0 947; RV64I-NEXT: call __gtdf2@plt 948; RV64I-NEXT: mv a1, a0 949; RV64I-NEXT: li a0, -1 950; RV64I-NEXT: bgtz a1, .LBB14_4 951; RV64I-NEXT: # %bb.3: # %start 952; RV64I-NEXT: mv a0, s2 953; RV64I-NEXT: .LBB14_4: # %start 954; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 955; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 956; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 957; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload 958; RV64I-NEXT: addi sp, sp, 32 959; RV64I-NEXT: ret 960start: 961 %0 = tail call i64 @llvm.fptoui.sat.i64.f64(double %a) 962 ret i64 %0 963} 964declare i64 @llvm.fptoui.sat.i64.f64(double) 965 966define i64 @fmv_x_d(double %a, double %b) nounwind { 967; RV32IFD-LABEL: fmv_x_d: 968; RV32IFD: # %bb.0: 969; RV32IFD-NEXT: addi sp, sp, -16 970; RV32IFD-NEXT: fadd.d ft0, fa0, fa1 971; RV32IFD-NEXT: fsd ft0, 8(sp) 972; RV32IFD-NEXT: lw a0, 8(sp) 973; RV32IFD-NEXT: lw a1, 12(sp) 974; RV32IFD-NEXT: addi sp, sp, 16 975; RV32IFD-NEXT: ret 976; 977; RV64IFD-LABEL: fmv_x_d: 978; RV64IFD: # %bb.0: 979; RV64IFD-NEXT: fadd.d ft0, fa0, fa1 980; RV64IFD-NEXT: fmv.x.d a0, ft0 981; RV64IFD-NEXT: ret 982; 983; RV32I-LABEL: fmv_x_d: 984; RV32I: # %bb.0: 985; RV32I-NEXT: addi sp, sp, -16 986; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 987; RV32I-NEXT: call __adddf3@plt 988; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 989; RV32I-NEXT: addi sp, sp, 16 990; RV32I-NEXT: ret 991; 992; RV64I-LABEL: fmv_x_d: 993; RV64I: # %bb.0: 994; RV64I-NEXT: addi sp, sp, -16 995; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 996; RV64I-NEXT: call __adddf3@plt 997; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 998; RV64I-NEXT: addi sp, sp, 16 999; RV64I-NEXT: ret 1000 %1 = fadd double %a, %b 1001 %2 = bitcast double %1 to i64 1002 ret i64 %2 1003} 1004 1005define double @fcvt_d_l(i64 %a) nounwind { 1006; RV32IFD-LABEL: fcvt_d_l: 1007; RV32IFD: # %bb.0: 1008; RV32IFD-NEXT: addi sp, sp, -16 1009; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1010; RV32IFD-NEXT: call __floatdidf@plt 1011; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1012; RV32IFD-NEXT: addi sp, sp, 16 1013; RV32IFD-NEXT: ret 1014; 1015; RV64IFD-LABEL: fcvt_d_l: 1016; RV64IFD: # %bb.0: 1017; RV64IFD-NEXT: fcvt.d.l fa0, a0 1018; RV64IFD-NEXT: ret 1019; 1020; RV32I-LABEL: fcvt_d_l: 1021; RV32I: # %bb.0: 1022; RV32I-NEXT: addi sp, sp, -16 1023; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1024; RV32I-NEXT: call __floatdidf@plt 1025; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1026; RV32I-NEXT: addi sp, sp, 16 1027; RV32I-NEXT: ret 1028; 1029; RV64I-LABEL: fcvt_d_l: 1030; RV64I: # %bb.0: 1031; RV64I-NEXT: addi sp, sp, -16 1032; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1033; RV64I-NEXT: call __floatdidf@plt 1034; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1035; RV64I-NEXT: addi sp, sp, 16 1036; RV64I-NEXT: ret 1037 %1 = sitofp i64 %a to double 1038 ret double %1 1039} 1040 1041define double @fcvt_d_lu(i64 %a) nounwind { 1042; RV32IFD-LABEL: fcvt_d_lu: 1043; RV32IFD: # %bb.0: 1044; RV32IFD-NEXT: addi sp, sp, -16 1045; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1046; RV32IFD-NEXT: call __floatundidf@plt 1047; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1048; RV32IFD-NEXT: addi sp, sp, 16 1049; RV32IFD-NEXT: ret 1050; 1051; RV64IFD-LABEL: fcvt_d_lu: 1052; RV64IFD: # %bb.0: 1053; RV64IFD-NEXT: fcvt.d.lu fa0, a0 1054; RV64IFD-NEXT: ret 1055; 1056; RV32I-LABEL: fcvt_d_lu: 1057; RV32I: # %bb.0: 1058; RV32I-NEXT: addi sp, sp, -16 1059; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1060; RV32I-NEXT: call __floatundidf@plt 1061; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1062; RV32I-NEXT: addi sp, sp, 16 1063; RV32I-NEXT: ret 1064; 1065; RV64I-LABEL: fcvt_d_lu: 1066; RV64I: # %bb.0: 1067; RV64I-NEXT: addi sp, sp, -16 1068; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1069; RV64I-NEXT: call __floatundidf@plt 1070; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1071; RV64I-NEXT: addi sp, sp, 16 1072; RV64I-NEXT: ret 1073 %1 = uitofp i64 %a to double 1074 ret double %1 1075} 1076 1077define double @fmv_d_x(i64 %a, i64 %b) nounwind { 1078; Ensure fmv.w.x is generated even for a soft double calling convention 1079; RV32IFD-LABEL: fmv_d_x: 1080; RV32IFD: # %bb.0: 1081; RV32IFD-NEXT: addi sp, sp, -16 1082; RV32IFD-NEXT: sw a3, 4(sp) 1083; RV32IFD-NEXT: sw a2, 0(sp) 1084; RV32IFD-NEXT: sw a1, 12(sp) 1085; RV32IFD-NEXT: sw a0, 8(sp) 1086; RV32IFD-NEXT: fld ft0, 0(sp) 1087; RV32IFD-NEXT: fld ft1, 8(sp) 1088; RV32IFD-NEXT: fadd.d fa0, ft1, ft0 1089; RV32IFD-NEXT: addi sp, sp, 16 1090; RV32IFD-NEXT: ret 1091; 1092; RV64IFD-LABEL: fmv_d_x: 1093; RV64IFD: # %bb.0: 1094; RV64IFD-NEXT: fmv.d.x ft0, a0 1095; RV64IFD-NEXT: fmv.d.x ft1, a1 1096; RV64IFD-NEXT: fadd.d fa0, ft0, ft1 1097; RV64IFD-NEXT: ret 1098; 1099; RV32I-LABEL: fmv_d_x: 1100; RV32I: # %bb.0: 1101; RV32I-NEXT: addi sp, sp, -16 1102; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1103; RV32I-NEXT: call __adddf3@plt 1104; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1105; RV32I-NEXT: addi sp, sp, 16 1106; RV32I-NEXT: ret 1107; 1108; RV64I-LABEL: fmv_d_x: 1109; RV64I: # %bb.0: 1110; RV64I-NEXT: addi sp, sp, -16 1111; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1112; RV64I-NEXT: call __adddf3@plt 1113; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1114; RV64I-NEXT: addi sp, sp, 16 1115; RV64I-NEXT: ret 1116 %1 = bitcast i64 %a to double 1117 %2 = bitcast i64 %b to double 1118 %3 = fadd double %1, %2 1119 ret double %3 1120} 1121 1122define double @fcvt_d_w_i8(i8 signext %a) nounwind { 1123; CHECKIFD-LABEL: fcvt_d_w_i8: 1124; CHECKIFD: # %bb.0: 1125; CHECKIFD-NEXT: fcvt.d.w fa0, a0 1126; CHECKIFD-NEXT: ret 1127; 1128; RV32I-LABEL: fcvt_d_w_i8: 1129; RV32I: # %bb.0: 1130; RV32I-NEXT: addi sp, sp, -16 1131; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1132; RV32I-NEXT: call __floatsidf@plt 1133; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1134; RV32I-NEXT: addi sp, sp, 16 1135; RV32I-NEXT: ret 1136; 1137; RV64I-LABEL: fcvt_d_w_i8: 1138; RV64I: # %bb.0: 1139; RV64I-NEXT: addi sp, sp, -16 1140; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1141; RV64I-NEXT: call __floatsidf@plt 1142; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1143; RV64I-NEXT: addi sp, sp, 16 1144; RV64I-NEXT: ret 1145 %1 = sitofp i8 %a to double 1146 ret double %1 1147} 1148 1149define double @fcvt_d_wu_i8(i8 zeroext %a) nounwind { 1150; CHECKIFD-LABEL: fcvt_d_wu_i8: 1151; CHECKIFD: # %bb.0: 1152; CHECKIFD-NEXT: fcvt.d.wu fa0, a0 1153; CHECKIFD-NEXT: ret 1154; 1155; RV32I-LABEL: fcvt_d_wu_i8: 1156; RV32I: # %bb.0: 1157; RV32I-NEXT: addi sp, sp, -16 1158; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1159; RV32I-NEXT: call __floatunsidf@plt 1160; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1161; RV32I-NEXT: addi sp, sp, 16 1162; RV32I-NEXT: ret 1163; 1164; RV64I-LABEL: fcvt_d_wu_i8: 1165; RV64I: # %bb.0: 1166; RV64I-NEXT: addi sp, sp, -16 1167; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1168; RV64I-NEXT: call __floatunsidf@plt 1169; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1170; RV64I-NEXT: addi sp, sp, 16 1171; RV64I-NEXT: ret 1172 %1 = uitofp i8 %a to double 1173 ret double %1 1174} 1175 1176define double @fcvt_d_w_i16(i16 signext %a) nounwind { 1177; CHECKIFD-LABEL: fcvt_d_w_i16: 1178; CHECKIFD: # %bb.0: 1179; CHECKIFD-NEXT: fcvt.d.w fa0, a0 1180; CHECKIFD-NEXT: ret 1181; 1182; RV32I-LABEL: fcvt_d_w_i16: 1183; RV32I: # %bb.0: 1184; RV32I-NEXT: addi sp, sp, -16 1185; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1186; RV32I-NEXT: call __floatsidf@plt 1187; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1188; RV32I-NEXT: addi sp, sp, 16 1189; RV32I-NEXT: ret 1190; 1191; RV64I-LABEL: fcvt_d_w_i16: 1192; RV64I: # %bb.0: 1193; RV64I-NEXT: addi sp, sp, -16 1194; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1195; RV64I-NEXT: call __floatsidf@plt 1196; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1197; RV64I-NEXT: addi sp, sp, 16 1198; RV64I-NEXT: ret 1199 %1 = sitofp i16 %a to double 1200 ret double %1 1201} 1202 1203define double @fcvt_d_wu_i16(i16 zeroext %a) nounwind { 1204; CHECKIFD-LABEL: fcvt_d_wu_i16: 1205; CHECKIFD: # %bb.0: 1206; CHECKIFD-NEXT: fcvt.d.wu fa0, a0 1207; CHECKIFD-NEXT: ret 1208; 1209; RV32I-LABEL: fcvt_d_wu_i16: 1210; RV32I: # %bb.0: 1211; RV32I-NEXT: addi sp, sp, -16 1212; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1213; RV32I-NEXT: call __floatunsidf@plt 1214; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1215; RV32I-NEXT: addi sp, sp, 16 1216; RV32I-NEXT: ret 1217; 1218; RV64I-LABEL: fcvt_d_wu_i16: 1219; RV64I: # %bb.0: 1220; RV64I-NEXT: addi sp, sp, -16 1221; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1222; RV64I-NEXT: call __floatunsidf@plt 1223; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1224; RV64I-NEXT: addi sp, sp, 16 1225; RV64I-NEXT: ret 1226 %1 = uitofp i16 %a to double 1227 ret double %1 1228} 1229 1230; Make sure we select W version of addi on RV64. 1231define signext i32 @fcvt_d_w_demanded_bits(i32 signext %0, double* %1) nounwind { 1232; RV32IFD-LABEL: fcvt_d_w_demanded_bits: 1233; RV32IFD: # %bb.0: 1234; RV32IFD-NEXT: addi a0, a0, 1 1235; RV32IFD-NEXT: fcvt.d.w ft0, a0 1236; RV32IFD-NEXT: fsd ft0, 0(a1) 1237; RV32IFD-NEXT: ret 1238; 1239; RV64IFD-LABEL: fcvt_d_w_demanded_bits: 1240; RV64IFD: # %bb.0: 1241; RV64IFD-NEXT: addiw a0, a0, 1 1242; RV64IFD-NEXT: fcvt.d.w ft0, a0 1243; RV64IFD-NEXT: fsd ft0, 0(a1) 1244; RV64IFD-NEXT: ret 1245; 1246; RV32I-LABEL: fcvt_d_w_demanded_bits: 1247; RV32I: # %bb.0: 1248; RV32I-NEXT: addi sp, sp, -16 1249; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1250; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 1251; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill 1252; RV32I-NEXT: mv s0, a1 1253; RV32I-NEXT: addi s1, a0, 1 1254; RV32I-NEXT: mv a0, s1 1255; RV32I-NEXT: call __floatsidf@plt 1256; RV32I-NEXT: sw a1, 4(s0) 1257; RV32I-NEXT: sw a0, 0(s0) 1258; RV32I-NEXT: mv a0, s1 1259; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1260; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 1261; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload 1262; RV32I-NEXT: addi sp, sp, 16 1263; RV32I-NEXT: ret 1264; 1265; RV64I-LABEL: fcvt_d_w_demanded_bits: 1266; RV64I: # %bb.0: 1267; RV64I-NEXT: addi sp, sp, -32 1268; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 1269; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 1270; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 1271; RV64I-NEXT: mv s0, a1 1272; RV64I-NEXT: addiw s1, a0, 1 1273; RV64I-NEXT: mv a0, s1 1274; RV64I-NEXT: call __floatsidf@plt 1275; RV64I-NEXT: sd a0, 0(s0) 1276; RV64I-NEXT: mv a0, s1 1277; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 1278; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 1279; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 1280; RV64I-NEXT: addi sp, sp, 32 1281; RV64I-NEXT: ret 1282 %3 = add i32 %0, 1 1283 %4 = sitofp i32 %3 to double 1284 store double %4, double* %1, align 8 1285 ret i32 %3 1286} 1287 1288; Make sure we select W version of addi on RV64. 1289define signext i32 @fcvt_d_wu_demanded_bits(i32 signext %0, double* %1) nounwind { 1290; RV32IFD-LABEL: fcvt_d_wu_demanded_bits: 1291; RV32IFD: # %bb.0: 1292; RV32IFD-NEXT: addi a0, a0, 1 1293; RV32IFD-NEXT: fcvt.d.wu ft0, a0 1294; RV32IFD-NEXT: fsd ft0, 0(a1) 1295; RV32IFD-NEXT: ret 1296; 1297; RV64IFD-LABEL: fcvt_d_wu_demanded_bits: 1298; RV64IFD: # %bb.0: 1299; RV64IFD-NEXT: addiw a0, a0, 1 1300; RV64IFD-NEXT: fcvt.d.wu ft0, a0 1301; RV64IFD-NEXT: fsd ft0, 0(a1) 1302; RV64IFD-NEXT: ret 1303; 1304; RV32I-LABEL: fcvt_d_wu_demanded_bits: 1305; RV32I: # %bb.0: 1306; RV32I-NEXT: addi sp, sp, -16 1307; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1308; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 1309; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill 1310; RV32I-NEXT: mv s0, a1 1311; RV32I-NEXT: addi s1, a0, 1 1312; RV32I-NEXT: mv a0, s1 1313; RV32I-NEXT: call __floatunsidf@plt 1314; RV32I-NEXT: sw a1, 4(s0) 1315; RV32I-NEXT: sw a0, 0(s0) 1316; RV32I-NEXT: mv a0, s1 1317; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1318; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 1319; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload 1320; RV32I-NEXT: addi sp, sp, 16 1321; RV32I-NEXT: ret 1322; 1323; RV64I-LABEL: fcvt_d_wu_demanded_bits: 1324; RV64I: # %bb.0: 1325; RV64I-NEXT: addi sp, sp, -32 1326; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 1327; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 1328; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 1329; RV64I-NEXT: mv s0, a1 1330; RV64I-NEXT: addiw s1, a0, 1 1331; RV64I-NEXT: mv a0, s1 1332; RV64I-NEXT: call __floatunsidf@plt 1333; RV64I-NEXT: sd a0, 0(s0) 1334; RV64I-NEXT: mv a0, s1 1335; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 1336; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 1337; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 1338; RV64I-NEXT: addi sp, sp, 32 1339; RV64I-NEXT: ret 1340 %3 = add i32 %0, 1 1341 %4 = uitofp i32 %3 to double 1342 store double %4, double* %1, align 8 1343 ret i32 %3 1344} 1345 1346define signext i16 @fcvt_w_s_i16(double %a) nounwind { 1347; RV32IFD-LABEL: fcvt_w_s_i16: 1348; RV32IFD: # %bb.0: 1349; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz 1350; RV32IFD-NEXT: ret 1351; 1352; RV64IFD-LABEL: fcvt_w_s_i16: 1353; RV64IFD: # %bb.0: 1354; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 1355; RV64IFD-NEXT: ret 1356; 1357; RV32I-LABEL: fcvt_w_s_i16: 1358; RV32I: # %bb.0: 1359; RV32I-NEXT: addi sp, sp, -16 1360; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1361; RV32I-NEXT: call __fixdfsi@plt 1362; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1363; RV32I-NEXT: addi sp, sp, 16 1364; RV32I-NEXT: ret 1365; 1366; RV64I-LABEL: fcvt_w_s_i16: 1367; RV64I: # %bb.0: 1368; RV64I-NEXT: addi sp, sp, -16 1369; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1370; RV64I-NEXT: call __fixdfdi@plt 1371; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1372; RV64I-NEXT: addi sp, sp, 16 1373; RV64I-NEXT: ret 1374 %1 = fptosi double %a to i16 1375 ret i16 %1 1376} 1377 1378define signext i16 @fcvt_w_s_sat_i16(double %a) nounwind { 1379; RV32IFD-LABEL: fcvt_w_s_sat_i16: 1380; RV32IFD: # %bb.0: # %start 1381; RV32IFD-NEXT: feq.d a0, fa0, fa0 1382; RV32IFD-NEXT: beqz a0, .LBB26_2 1383; RV32IFD-NEXT: # %bb.1: 1384; RV32IFD-NEXT: lui a0, %hi(.LCPI26_0) 1385; RV32IFD-NEXT: fld ft0, %lo(.LCPI26_0)(a0) 1386; RV32IFD-NEXT: lui a0, %hi(.LCPI26_1) 1387; RV32IFD-NEXT: fld ft1, %lo(.LCPI26_1)(a0) 1388; RV32IFD-NEXT: fmax.d ft0, fa0, ft0 1389; RV32IFD-NEXT: fmin.d ft0, ft0, ft1 1390; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz 1391; RV32IFD-NEXT: .LBB26_2: # %start 1392; RV32IFD-NEXT: ret 1393; 1394; RV64IFD-LABEL: fcvt_w_s_sat_i16: 1395; RV64IFD: # %bb.0: # %start 1396; RV64IFD-NEXT: feq.d a0, fa0, fa0 1397; RV64IFD-NEXT: beqz a0, .LBB26_2 1398; RV64IFD-NEXT: # %bb.1: 1399; RV64IFD-NEXT: lui a0, %hi(.LCPI26_0) 1400; RV64IFD-NEXT: fld ft0, %lo(.LCPI26_0)(a0) 1401; RV64IFD-NEXT: lui a0, %hi(.LCPI26_1) 1402; RV64IFD-NEXT: fld ft1, %lo(.LCPI26_1)(a0) 1403; RV64IFD-NEXT: fmax.d ft0, fa0, ft0 1404; RV64IFD-NEXT: fmin.d ft0, ft0, ft1 1405; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz 1406; RV64IFD-NEXT: .LBB26_2: # %start 1407; RV64IFD-NEXT: ret 1408; 1409; RV32I-LABEL: fcvt_w_s_sat_i16: 1410; RV32I: # %bb.0: # %start 1411; RV32I-NEXT: addi sp, sp, -32 1412; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1413; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 1414; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 1415; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 1416; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 1417; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill 1418; RV32I-NEXT: mv s1, a1 1419; RV32I-NEXT: mv s2, a0 1420; RV32I-NEXT: lui a3, 790016 1421; RV32I-NEXT: li s0, 0 1422; RV32I-NEXT: li a2, 0 1423; RV32I-NEXT: call __gedf2@plt 1424; RV32I-NEXT: mv s3, a0 1425; RV32I-NEXT: mv a0, s2 1426; RV32I-NEXT: mv a1, s1 1427; RV32I-NEXT: call __fixdfsi@plt 1428; RV32I-NEXT: lui s4, 1048568 1429; RV32I-NEXT: blt s3, s0, .LBB26_2 1430; RV32I-NEXT: # %bb.1: # %start 1431; RV32I-NEXT: mv s4, a0 1432; RV32I-NEXT: .LBB26_2: # %start 1433; RV32I-NEXT: lui a0, 265728 1434; RV32I-NEXT: addi a3, a0, -64 1435; RV32I-NEXT: mv a0, s2 1436; RV32I-NEXT: mv a1, s1 1437; RV32I-NEXT: mv a2, s0 1438; RV32I-NEXT: call __gtdf2@plt 1439; RV32I-NEXT: bge s0, a0, .LBB26_4 1440; RV32I-NEXT: # %bb.3: 1441; RV32I-NEXT: lui a0, 8 1442; RV32I-NEXT: addi s4, a0, -1 1443; RV32I-NEXT: .LBB26_4: # %start 1444; RV32I-NEXT: mv a0, s2 1445; RV32I-NEXT: mv a1, s1 1446; RV32I-NEXT: mv a2, s2 1447; RV32I-NEXT: mv a3, s1 1448; RV32I-NEXT: call __unorddf2@plt 1449; RV32I-NEXT: bne a0, s0, .LBB26_6 1450; RV32I-NEXT: # %bb.5: # %start 1451; RV32I-NEXT: mv s0, s4 1452; RV32I-NEXT: .LBB26_6: # %start 1453; RV32I-NEXT: slli a0, s0, 16 1454; RV32I-NEXT: srai a0, a0, 16 1455; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1456; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 1457; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 1458; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 1459; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 1460; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload 1461; RV32I-NEXT: addi sp, sp, 32 1462; RV32I-NEXT: ret 1463; 1464; RV64I-LABEL: fcvt_w_s_sat_i16: 1465; RV64I: # %bb.0: # %start 1466; RV64I-NEXT: addi sp, sp, -48 1467; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill 1468; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill 1469; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill 1470; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill 1471; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill 1472; RV64I-NEXT: mv s0, a0 1473; RV64I-NEXT: li a0, -505 1474; RV64I-NEXT: slli a1, a0, 53 1475; RV64I-NEXT: mv a0, s0 1476; RV64I-NEXT: call __gedf2@plt 1477; RV64I-NEXT: mv s1, a0 1478; RV64I-NEXT: mv a0, s0 1479; RV64I-NEXT: call __fixdfdi@plt 1480; RV64I-NEXT: li s2, 0 1481; RV64I-NEXT: lui s3, 1048568 1482; RV64I-NEXT: bltz s1, .LBB26_2 1483; RV64I-NEXT: # %bb.1: # %start 1484; RV64I-NEXT: mv s3, a0 1485; RV64I-NEXT: .LBB26_2: # %start 1486; RV64I-NEXT: lui a0, 4152 1487; RV64I-NEXT: addiw a0, a0, -1 1488; RV64I-NEXT: slli a1, a0, 38 1489; RV64I-NEXT: mv a0, s0 1490; RV64I-NEXT: call __gtdf2@plt 1491; RV64I-NEXT: bge s2, a0, .LBB26_4 1492; RV64I-NEXT: # %bb.3: 1493; RV64I-NEXT: lui a0, 8 1494; RV64I-NEXT: addiw s3, a0, -1 1495; RV64I-NEXT: .LBB26_4: # %start 1496; RV64I-NEXT: mv a0, s0 1497; RV64I-NEXT: mv a1, s0 1498; RV64I-NEXT: call __unorddf2@plt 1499; RV64I-NEXT: bne a0, s2, .LBB26_6 1500; RV64I-NEXT: # %bb.5: # %start 1501; RV64I-NEXT: mv s2, s3 1502; RV64I-NEXT: .LBB26_6: # %start 1503; RV64I-NEXT: slli a0, s2, 48 1504; RV64I-NEXT: srai a0, a0, 48 1505; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload 1506; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload 1507; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload 1508; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload 1509; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload 1510; RV64I-NEXT: addi sp, sp, 48 1511; RV64I-NEXT: ret 1512start: 1513 %0 = tail call i16 @llvm.fptosi.sat.i16.f64(double %a) 1514 ret i16 %0 1515} 1516declare i16 @llvm.fptosi.sat.i16.f64(double) 1517 1518define zeroext i16 @fcvt_wu_s_i16(double %a) nounwind { 1519; RV32IFD-LABEL: fcvt_wu_s_i16: 1520; RV32IFD: # %bb.0: 1521; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz 1522; RV32IFD-NEXT: ret 1523; 1524; RV64IFD-LABEL: fcvt_wu_s_i16: 1525; RV64IFD: # %bb.0: 1526; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz 1527; RV64IFD-NEXT: ret 1528; 1529; RV32I-LABEL: fcvt_wu_s_i16: 1530; RV32I: # %bb.0: 1531; RV32I-NEXT: addi sp, sp, -16 1532; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1533; RV32I-NEXT: call __fixunsdfsi@plt 1534; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1535; RV32I-NEXT: addi sp, sp, 16 1536; RV32I-NEXT: ret 1537; 1538; RV64I-LABEL: fcvt_wu_s_i16: 1539; RV64I: # %bb.0: 1540; RV64I-NEXT: addi sp, sp, -16 1541; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1542; RV64I-NEXT: call __fixunsdfdi@plt 1543; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1544; RV64I-NEXT: addi sp, sp, 16 1545; RV64I-NEXT: ret 1546 %1 = fptoui double %a to i16 1547 ret i16 %1 1548} 1549 1550define zeroext i16 @fcvt_wu_s_sat_i16(double %a) nounwind { 1551; RV32IFD-LABEL: fcvt_wu_s_sat_i16: 1552; RV32IFD: # %bb.0: # %start 1553; RV32IFD-NEXT: lui a0, %hi(.LCPI28_0) 1554; RV32IFD-NEXT: fld ft0, %lo(.LCPI28_0)(a0) 1555; RV32IFD-NEXT: fcvt.d.w ft1, zero 1556; RV32IFD-NEXT: fmax.d ft1, fa0, ft1 1557; RV32IFD-NEXT: fmin.d ft0, ft1, ft0 1558; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz 1559; RV32IFD-NEXT: ret 1560; 1561; RV64IFD-LABEL: fcvt_wu_s_sat_i16: 1562; RV64IFD: # %bb.0: # %start 1563; RV64IFD-NEXT: lui a0, %hi(.LCPI28_0) 1564; RV64IFD-NEXT: fld ft0, %lo(.LCPI28_0)(a0) 1565; RV64IFD-NEXT: fmv.d.x ft1, zero 1566; RV64IFD-NEXT: fmax.d ft1, fa0, ft1 1567; RV64IFD-NEXT: fmin.d ft0, ft1, ft0 1568; RV64IFD-NEXT: fcvt.lu.d a0, ft0, rtz 1569; RV64IFD-NEXT: ret 1570; 1571; RV32I-LABEL: fcvt_wu_s_sat_i16: 1572; RV32I: # %bb.0: # %start 1573; RV32I-NEXT: addi sp, sp, -32 1574; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1575; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 1576; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 1577; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 1578; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 1579; RV32I-NEXT: mv s1, a1 1580; RV32I-NEXT: mv s2, a0 1581; RV32I-NEXT: lui a0, 265984 1582; RV32I-NEXT: addi a3, a0, -32 1583; RV32I-NEXT: mv a0, s2 1584; RV32I-NEXT: li a2, 0 1585; RV32I-NEXT: call __gtdf2@plt 1586; RV32I-NEXT: mv s0, a0 1587; RV32I-NEXT: mv a0, s2 1588; RV32I-NEXT: mv a1, s1 1589; RV32I-NEXT: li a2, 0 1590; RV32I-NEXT: li a3, 0 1591; RV32I-NEXT: call __gedf2@plt 1592; RV32I-NEXT: mv s3, a0 1593; RV32I-NEXT: mv a0, s2 1594; RV32I-NEXT: mv a1, s1 1595; RV32I-NEXT: call __fixunsdfsi@plt 1596; RV32I-NEXT: li a1, 0 1597; RV32I-NEXT: bltz s3, .LBB28_2 1598; RV32I-NEXT: # %bb.1: # %start 1599; RV32I-NEXT: mv a1, a0 1600; RV32I-NEXT: .LBB28_2: # %start 1601; RV32I-NEXT: lui a0, 16 1602; RV32I-NEXT: addi a0, a0, -1 1603; RV32I-NEXT: mv a2, a0 1604; RV32I-NEXT: bgtz s0, .LBB28_4 1605; RV32I-NEXT: # %bb.3: # %start 1606; RV32I-NEXT: mv a2, a1 1607; RV32I-NEXT: .LBB28_4: # %start 1608; RV32I-NEXT: and a0, a2, a0 1609; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1610; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 1611; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 1612; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 1613; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 1614; RV32I-NEXT: addi sp, sp, 32 1615; RV32I-NEXT: ret 1616; 1617; RV64I-LABEL: fcvt_wu_s_sat_i16: 1618; RV64I: # %bb.0: # %start 1619; RV64I-NEXT: addi sp, sp, -32 1620; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 1621; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 1622; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 1623; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill 1624; RV64I-NEXT: mv s0, a0 1625; RV64I-NEXT: li a1, 0 1626; RV64I-NEXT: call __gedf2@plt 1627; RV64I-NEXT: mv s1, a0 1628; RV64I-NEXT: mv a0, s0 1629; RV64I-NEXT: call __fixunsdfdi@plt 1630; RV64I-NEXT: li s2, 0 1631; RV64I-NEXT: bltz s1, .LBB28_2 1632; RV64I-NEXT: # %bb.1: # %start 1633; RV64I-NEXT: mv s2, a0 1634; RV64I-NEXT: .LBB28_2: # %start 1635; RV64I-NEXT: lui a0, 8312 1636; RV64I-NEXT: addiw a0, a0, -1 1637; RV64I-NEXT: slli a1, a0, 37 1638; RV64I-NEXT: mv a0, s0 1639; RV64I-NEXT: call __gtdf2@plt 1640; RV64I-NEXT: lui a1, 16 1641; RV64I-NEXT: addiw a1, a1, -1 1642; RV64I-NEXT: mv a2, a1 1643; RV64I-NEXT: bgtz a0, .LBB28_4 1644; RV64I-NEXT: # %bb.3: # %start 1645; RV64I-NEXT: mv a2, s2 1646; RV64I-NEXT: .LBB28_4: # %start 1647; RV64I-NEXT: and a0, a2, a1 1648; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 1649; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 1650; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 1651; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload 1652; RV64I-NEXT: addi sp, sp, 32 1653; RV64I-NEXT: ret 1654start: 1655 %0 = tail call i16 @llvm.fptoui.sat.i16.f64(double %a) 1656 ret i16 %0 1657} 1658declare i16 @llvm.fptoui.sat.i16.f64(double) 1659 1660define signext i8 @fcvt_w_s_i8(double %a) nounwind { 1661; RV32IFD-LABEL: fcvt_w_s_i8: 1662; RV32IFD: # %bb.0: 1663; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz 1664; RV32IFD-NEXT: ret 1665; 1666; RV64IFD-LABEL: fcvt_w_s_i8: 1667; RV64IFD: # %bb.0: 1668; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 1669; RV64IFD-NEXT: ret 1670; 1671; RV32I-LABEL: fcvt_w_s_i8: 1672; RV32I: # %bb.0: 1673; RV32I-NEXT: addi sp, sp, -16 1674; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1675; RV32I-NEXT: call __fixdfsi@plt 1676; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1677; RV32I-NEXT: addi sp, sp, 16 1678; RV32I-NEXT: ret 1679; 1680; RV64I-LABEL: fcvt_w_s_i8: 1681; RV64I: # %bb.0: 1682; RV64I-NEXT: addi sp, sp, -16 1683; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1684; RV64I-NEXT: call __fixdfdi@plt 1685; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1686; RV64I-NEXT: addi sp, sp, 16 1687; RV64I-NEXT: ret 1688 %1 = fptosi double %a to i8 1689 ret i8 %1 1690} 1691 1692define signext i8 @fcvt_w_s_sat_i8(double %a) nounwind { 1693; RV32IFD-LABEL: fcvt_w_s_sat_i8: 1694; RV32IFD: # %bb.0: # %start 1695; RV32IFD-NEXT: feq.d a0, fa0, fa0 1696; RV32IFD-NEXT: beqz a0, .LBB30_2 1697; RV32IFD-NEXT: # %bb.1: 1698; RV32IFD-NEXT: lui a0, %hi(.LCPI30_0) 1699; RV32IFD-NEXT: fld ft0, %lo(.LCPI30_0)(a0) 1700; RV32IFD-NEXT: lui a0, %hi(.LCPI30_1) 1701; RV32IFD-NEXT: fld ft1, %lo(.LCPI30_1)(a0) 1702; RV32IFD-NEXT: fmax.d ft0, fa0, ft0 1703; RV32IFD-NEXT: fmin.d ft0, ft0, ft1 1704; RV32IFD-NEXT: fcvt.w.d a0, ft0, rtz 1705; RV32IFD-NEXT: .LBB30_2: # %start 1706; RV32IFD-NEXT: ret 1707; 1708; RV64IFD-LABEL: fcvt_w_s_sat_i8: 1709; RV64IFD: # %bb.0: # %start 1710; RV64IFD-NEXT: feq.d a0, fa0, fa0 1711; RV64IFD-NEXT: beqz a0, .LBB30_2 1712; RV64IFD-NEXT: # %bb.1: 1713; RV64IFD-NEXT: lui a0, %hi(.LCPI30_0) 1714; RV64IFD-NEXT: fld ft0, %lo(.LCPI30_0)(a0) 1715; RV64IFD-NEXT: lui a0, %hi(.LCPI30_1) 1716; RV64IFD-NEXT: fld ft1, %lo(.LCPI30_1)(a0) 1717; RV64IFD-NEXT: fmax.d ft0, fa0, ft0 1718; RV64IFD-NEXT: fmin.d ft0, ft0, ft1 1719; RV64IFD-NEXT: fcvt.l.d a0, ft0, rtz 1720; RV64IFD-NEXT: .LBB30_2: # %start 1721; RV64IFD-NEXT: ret 1722; 1723; RV32I-LABEL: fcvt_w_s_sat_i8: 1724; RV32I: # %bb.0: # %start 1725; RV32I-NEXT: addi sp, sp, -32 1726; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1727; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 1728; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 1729; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 1730; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 1731; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill 1732; RV32I-NEXT: mv s1, a1 1733; RV32I-NEXT: mv s2, a0 1734; RV32I-NEXT: lui a3, 787968 1735; RV32I-NEXT: li s0, 0 1736; RV32I-NEXT: li a2, 0 1737; RV32I-NEXT: call __gedf2@plt 1738; RV32I-NEXT: mv s3, a0 1739; RV32I-NEXT: mv a0, s2 1740; RV32I-NEXT: mv a1, s1 1741; RV32I-NEXT: call __fixdfsi@plt 1742; RV32I-NEXT: li s4, -128 1743; RV32I-NEXT: blt s3, s0, .LBB30_2 1744; RV32I-NEXT: # %bb.1: # %start 1745; RV32I-NEXT: mv s4, a0 1746; RV32I-NEXT: .LBB30_2: # %start 1747; RV32I-NEXT: lui a3, 263676 1748; RV32I-NEXT: mv a0, s2 1749; RV32I-NEXT: mv a1, s1 1750; RV32I-NEXT: mv a2, s0 1751; RV32I-NEXT: call __gtdf2@plt 1752; RV32I-NEXT: li s3, 127 1753; RV32I-NEXT: blt s0, a0, .LBB30_4 1754; RV32I-NEXT: # %bb.3: # %start 1755; RV32I-NEXT: mv s3, s4 1756; RV32I-NEXT: .LBB30_4: # %start 1757; RV32I-NEXT: mv a0, s2 1758; RV32I-NEXT: mv a1, s1 1759; RV32I-NEXT: mv a2, s2 1760; RV32I-NEXT: mv a3, s1 1761; RV32I-NEXT: call __unorddf2@plt 1762; RV32I-NEXT: bne a0, s0, .LBB30_6 1763; RV32I-NEXT: # %bb.5: # %start 1764; RV32I-NEXT: mv s0, s3 1765; RV32I-NEXT: .LBB30_6: # %start 1766; RV32I-NEXT: slli a0, s0, 24 1767; RV32I-NEXT: srai a0, a0, 24 1768; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1769; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 1770; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 1771; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 1772; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 1773; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload 1774; RV32I-NEXT: addi sp, sp, 32 1775; RV32I-NEXT: ret 1776; 1777; RV64I-LABEL: fcvt_w_s_sat_i8: 1778; RV64I: # %bb.0: # %start 1779; RV64I-NEXT: addi sp, sp, -48 1780; RV64I-NEXT: sd ra, 40(sp) # 8-byte Folded Spill 1781; RV64I-NEXT: sd s0, 32(sp) # 8-byte Folded Spill 1782; RV64I-NEXT: sd s1, 24(sp) # 8-byte Folded Spill 1783; RV64I-NEXT: sd s2, 16(sp) # 8-byte Folded Spill 1784; RV64I-NEXT: sd s3, 8(sp) # 8-byte Folded Spill 1785; RV64I-NEXT: mv s0, a0 1786; RV64I-NEXT: li a0, -509 1787; RV64I-NEXT: slli a1, a0, 53 1788; RV64I-NEXT: mv a0, s0 1789; RV64I-NEXT: call __gedf2@plt 1790; RV64I-NEXT: mv s1, a0 1791; RV64I-NEXT: mv a0, s0 1792; RV64I-NEXT: call __fixdfdi@plt 1793; RV64I-NEXT: li s2, 0 1794; RV64I-NEXT: li s3, -128 1795; RV64I-NEXT: bltz s1, .LBB30_2 1796; RV64I-NEXT: # %bb.1: # %start 1797; RV64I-NEXT: mv s3, a0 1798; RV64I-NEXT: .LBB30_2: # %start 1799; RV64I-NEXT: lui a0, 65919 1800; RV64I-NEXT: slli a1, a0, 34 1801; RV64I-NEXT: mv a0, s0 1802; RV64I-NEXT: call __gtdf2@plt 1803; RV64I-NEXT: li s1, 127 1804; RV64I-NEXT: blt s2, a0, .LBB30_4 1805; RV64I-NEXT: # %bb.3: # %start 1806; RV64I-NEXT: mv s1, s3 1807; RV64I-NEXT: .LBB30_4: # %start 1808; RV64I-NEXT: mv a0, s0 1809; RV64I-NEXT: mv a1, s0 1810; RV64I-NEXT: call __unorddf2@plt 1811; RV64I-NEXT: bne a0, s2, .LBB30_6 1812; RV64I-NEXT: # %bb.5: # %start 1813; RV64I-NEXT: mv s2, s1 1814; RV64I-NEXT: .LBB30_6: # %start 1815; RV64I-NEXT: slli a0, s2, 56 1816; RV64I-NEXT: srai a0, a0, 56 1817; RV64I-NEXT: ld ra, 40(sp) # 8-byte Folded Reload 1818; RV64I-NEXT: ld s0, 32(sp) # 8-byte Folded Reload 1819; RV64I-NEXT: ld s1, 24(sp) # 8-byte Folded Reload 1820; RV64I-NEXT: ld s2, 16(sp) # 8-byte Folded Reload 1821; RV64I-NEXT: ld s3, 8(sp) # 8-byte Folded Reload 1822; RV64I-NEXT: addi sp, sp, 48 1823; RV64I-NEXT: ret 1824start: 1825 %0 = tail call i8 @llvm.fptosi.sat.i8.f64(double %a) 1826 ret i8 %0 1827} 1828declare i8 @llvm.fptosi.sat.i8.f64(double) 1829 1830define zeroext i8 @fcvt_wu_s_i8(double %a) nounwind { 1831; 1832; 1833; RV32IFD-LABEL: fcvt_wu_s_i8: 1834; RV32IFD: # %bb.0: 1835; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz 1836; RV32IFD-NEXT: ret 1837; 1838; RV64IFD-LABEL: fcvt_wu_s_i8: 1839; RV64IFD: # %bb.0: 1840; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz 1841; RV64IFD-NEXT: ret 1842; 1843; RV32I-LABEL: fcvt_wu_s_i8: 1844; RV32I: # %bb.0: 1845; RV32I-NEXT: addi sp, sp, -16 1846; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1847; RV32I-NEXT: call __fixunsdfsi@plt 1848; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1849; RV32I-NEXT: addi sp, sp, 16 1850; RV32I-NEXT: ret 1851; 1852; RV64I-LABEL: fcvt_wu_s_i8: 1853; RV64I: # %bb.0: 1854; RV64I-NEXT: addi sp, sp, -16 1855; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1856; RV64I-NEXT: call __fixunsdfdi@plt 1857; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1858; RV64I-NEXT: addi sp, sp, 16 1859; RV64I-NEXT: ret 1860 %1 = fptoui double %a to i8 1861 ret i8 %1 1862} 1863 1864define zeroext i8 @fcvt_wu_s_sat_i8(double %a) nounwind { 1865; 1866; 1867; RV32IFD-LABEL: fcvt_wu_s_sat_i8: 1868; RV32IFD: # %bb.0: # %start 1869; RV32IFD-NEXT: lui a0, %hi(.LCPI32_0) 1870; RV32IFD-NEXT: fld ft0, %lo(.LCPI32_0)(a0) 1871; RV32IFD-NEXT: fcvt.d.w ft1, zero 1872; RV32IFD-NEXT: fmax.d ft1, fa0, ft1 1873; RV32IFD-NEXT: fmin.d ft0, ft1, ft0 1874; RV32IFD-NEXT: fcvt.wu.d a0, ft0, rtz 1875; RV32IFD-NEXT: ret 1876; 1877; RV64IFD-LABEL: fcvt_wu_s_sat_i8: 1878; RV64IFD: # %bb.0: # %start 1879; RV64IFD-NEXT: lui a0, %hi(.LCPI32_0) 1880; RV64IFD-NEXT: fld ft0, %lo(.LCPI32_0)(a0) 1881; RV64IFD-NEXT: fmv.d.x ft1, zero 1882; RV64IFD-NEXT: fmax.d ft1, fa0, ft1 1883; RV64IFD-NEXT: fmin.d ft0, ft1, ft0 1884; RV64IFD-NEXT: fcvt.lu.d a0, ft0, rtz 1885; RV64IFD-NEXT: ret 1886; 1887; RV32I-LABEL: fcvt_wu_s_sat_i8: 1888; RV32I: # %bb.0: # %start 1889; RV32I-NEXT: addi sp, sp, -32 1890; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1891; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 1892; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 1893; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 1894; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 1895; RV32I-NEXT: mv s1, a1 1896; RV32I-NEXT: mv s2, a0 1897; RV32I-NEXT: lui a3, 263934 1898; RV32I-NEXT: li a2, 0 1899; RV32I-NEXT: call __gtdf2@plt 1900; RV32I-NEXT: mv s0, a0 1901; RV32I-NEXT: mv a0, s2 1902; RV32I-NEXT: mv a1, s1 1903; RV32I-NEXT: li a2, 0 1904; RV32I-NEXT: li a3, 0 1905; RV32I-NEXT: call __gedf2@plt 1906; RV32I-NEXT: mv s3, a0 1907; RV32I-NEXT: mv a0, s2 1908; RV32I-NEXT: mv a1, s1 1909; RV32I-NEXT: call __fixunsdfsi@plt 1910; RV32I-NEXT: li a1, 0 1911; RV32I-NEXT: bltz s3, .LBB32_2 1912; RV32I-NEXT: # %bb.1: # %start 1913; RV32I-NEXT: mv a1, a0 1914; RV32I-NEXT: .LBB32_2: # %start 1915; RV32I-NEXT: li a0, 255 1916; RV32I-NEXT: bgtz s0, .LBB32_4 1917; RV32I-NEXT: # %bb.3: # %start 1918; RV32I-NEXT: mv a0, a1 1919; RV32I-NEXT: .LBB32_4: # %start 1920; RV32I-NEXT: andi a0, a0, 255 1921; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1922; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 1923; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 1924; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 1925; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 1926; RV32I-NEXT: addi sp, sp, 32 1927; RV32I-NEXT: ret 1928; 1929; RV64I-LABEL: fcvt_wu_s_sat_i8: 1930; RV64I: # %bb.0: # %start 1931; RV64I-NEXT: addi sp, sp, -32 1932; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 1933; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 1934; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 1935; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill 1936; RV64I-NEXT: mv s0, a0 1937; RV64I-NEXT: li a1, 0 1938; RV64I-NEXT: call __gedf2@plt 1939; RV64I-NEXT: mv s1, a0 1940; RV64I-NEXT: mv a0, s0 1941; RV64I-NEXT: call __fixunsdfdi@plt 1942; RV64I-NEXT: li s2, 0 1943; RV64I-NEXT: bltz s1, .LBB32_2 1944; RV64I-NEXT: # %bb.1: # %start 1945; RV64I-NEXT: mv s2, a0 1946; RV64I-NEXT: .LBB32_2: # %start 1947; RV64I-NEXT: lui a0, 131967 1948; RV64I-NEXT: slli a1, a0, 33 1949; RV64I-NEXT: mv a0, s0 1950; RV64I-NEXT: call __gtdf2@plt 1951; RV64I-NEXT: li a1, 255 1952; RV64I-NEXT: bgtz a0, .LBB32_4 1953; RV64I-NEXT: # %bb.3: # %start 1954; RV64I-NEXT: mv a1, s2 1955; RV64I-NEXT: .LBB32_4: # %start 1956; RV64I-NEXT: andi a0, a1, 255 1957; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 1958; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 1959; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 1960; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload 1961; RV64I-NEXT: addi sp, sp, 32 1962; RV64I-NEXT: ret 1963start: 1964 %0 = tail call i8 @llvm.fptoui.sat.i8.f64(double %a) 1965 ret i8 %0 1966} 1967declare i8 @llvm.fptoui.sat.i8.f64(double) 1968