1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ 3; RUN: -disable-strictnode-mutation -target-abi=ilp32f \ 4; RUN: | FileCheck -check-prefixes=CHECKIF,RV32IF %s 5; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ 6; RUN: -disable-strictnode-mutation -target-abi=lp64f \ 7; RUN: | FileCheck -check-prefixes=CHECKIF,RV64IF %s 8; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ 9; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV32I %s 10; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 11; RUN: -disable-strictnode-mutation | FileCheck -check-prefix=RV64I %s 12 13; NOTE: The rounding mode metadata does not effect which instruction is 14; selected. Dynamic rounding mode is always used for operations that 15; support rounding mode. 16 17define i32 @fcvt_w_s(float %a) nounwind strictfp { 18; CHECKIF-LABEL: fcvt_w_s: 19; CHECKIF: # %bb.0: 20; CHECKIF-NEXT: fcvt.w.s a0, fa0, rtz 21; CHECKIF-NEXT: ret 22; 23; RV32I-LABEL: fcvt_w_s: 24; RV32I: # %bb.0: 25; RV32I-NEXT: addi sp, sp, -16 26; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 27; RV32I-NEXT: call __fixsfsi@plt 28; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 29; RV32I-NEXT: addi sp, sp, 16 30; RV32I-NEXT: ret 31; 32; RV64I-LABEL: fcvt_w_s: 33; RV64I: # %bb.0: 34; RV64I-NEXT: addi sp, sp, -16 35; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 36; RV64I-NEXT: call __fixsfsi@plt 37; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 38; RV64I-NEXT: addi sp, sp, 16 39; RV64I-NEXT: ret 40 %1 = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") strictfp 41 ret i32 %1 42} 43declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) 44 45define i32 @fcvt_wu_s(float %a) nounwind strictfp { 46; CHECKIF-LABEL: fcvt_wu_s: 47; CHECKIF: # %bb.0: 48; CHECKIF-NEXT: fcvt.wu.s a0, fa0, rtz 49; CHECKIF-NEXT: ret 50; 51; RV32I-LABEL: fcvt_wu_s: 52; RV32I: # %bb.0: 53; RV32I-NEXT: addi sp, sp, -16 54; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 55; RV32I-NEXT: call __fixunssfsi@plt 56; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 57; RV32I-NEXT: addi sp, sp, 16 58; RV32I-NEXT: ret 59; 60; RV64I-LABEL: fcvt_wu_s: 61; RV64I: # %bb.0: 62; RV64I-NEXT: addi sp, sp, -16 63; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 64; RV64I-NEXT: call __fixunssfsi@plt 65; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 66; RV64I-NEXT: addi sp, sp, 16 67; RV64I-NEXT: ret 68 %1 = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") strictfp 69 ret i32 %1 70} 71declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) 72 73; Test where the fptoui has multiple uses, one of which causes a sext to be 74; inserted on RV64. 75define i32 @fcvt_wu_s_multiple_use(float %x, i32* %y) nounwind { 76; CHECKIF-LABEL: fcvt_wu_s_multiple_use: 77; CHECKIF: # %bb.0: 78; CHECKIF-NEXT: fcvt.wu.s a1, fa0, rtz 79; CHECKIF-NEXT: li a0, 1 80; CHECKIF-NEXT: beqz a1, .LBB2_2 81; CHECKIF-NEXT: # %bb.1: 82; CHECKIF-NEXT: mv a0, a1 83; CHECKIF-NEXT: .LBB2_2: 84; CHECKIF-NEXT: ret 85; 86; RV32I-LABEL: fcvt_wu_s_multiple_use: 87; RV32I: # %bb.0: 88; RV32I-NEXT: addi sp, sp, -16 89; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 90; RV32I-NEXT: call __fixunssfsi@plt 91; RV32I-NEXT: mv a1, a0 92; RV32I-NEXT: li a0, 1 93; RV32I-NEXT: beqz a1, .LBB2_2 94; RV32I-NEXT: # %bb.1: 95; RV32I-NEXT: mv a0, a1 96; RV32I-NEXT: .LBB2_2: 97; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 98; RV32I-NEXT: addi sp, sp, 16 99; RV32I-NEXT: ret 100; 101; RV64I-LABEL: fcvt_wu_s_multiple_use: 102; RV64I: # %bb.0: 103; RV64I-NEXT: addi sp, sp, -16 104; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 105; RV64I-NEXT: call __fixunssfsi@plt 106; RV64I-NEXT: mv a1, a0 107; RV64I-NEXT: li a0, 1 108; RV64I-NEXT: beqz a1, .LBB2_2 109; RV64I-NEXT: # %bb.1: 110; RV64I-NEXT: mv a0, a1 111; RV64I-NEXT: .LBB2_2: 112; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 113; RV64I-NEXT: addi sp, sp, 16 114; RV64I-NEXT: ret 115 %a = call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %x, metadata !"fpexcept.strict") strictfp 116 %b = icmp eq i32 %a, 0 117 %c = select i1 %b, i32 1, i32 %a 118 ret i32 %c 119} 120 121define float @fcvt_s_w(i32 %a) nounwind strictfp { 122; CHECKIF-LABEL: fcvt_s_w: 123; CHECKIF: # %bb.0: 124; CHECKIF-NEXT: fcvt.s.w fa0, a0 125; CHECKIF-NEXT: ret 126; 127; RV32I-LABEL: fcvt_s_w: 128; RV32I: # %bb.0: 129; RV32I-NEXT: addi sp, sp, -16 130; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 131; RV32I-NEXT: call __floatsisf@plt 132; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 133; RV32I-NEXT: addi sp, sp, 16 134; RV32I-NEXT: ret 135; 136; RV64I-LABEL: fcvt_s_w: 137; RV64I: # %bb.0: 138; RV64I-NEXT: addi sp, sp, -16 139; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 140; RV64I-NEXT: sext.w a0, a0 141; RV64I-NEXT: call __floatsisf@plt 142; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 143; RV64I-NEXT: addi sp, sp, 16 144; RV64I-NEXT: ret 145 %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 146 ret float %1 147} 148declare float @llvm.experimental.constrained.sitofp.f32.i32(i32, metadata, metadata) 149 150define float @fcvt_s_w_load(i32* %p) nounwind strictfp { 151; CHECKIF-LABEL: fcvt_s_w_load: 152; CHECKIF: # %bb.0: 153; CHECKIF-NEXT: lw a0, 0(a0) 154; CHECKIF-NEXT: fcvt.s.w fa0, a0 155; CHECKIF-NEXT: ret 156; 157; RV32I-LABEL: fcvt_s_w_load: 158; RV32I: # %bb.0: 159; RV32I-NEXT: addi sp, sp, -16 160; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 161; RV32I-NEXT: lw a0, 0(a0) 162; RV32I-NEXT: call __floatsisf@plt 163; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 164; RV32I-NEXT: addi sp, sp, 16 165; RV32I-NEXT: ret 166; 167; RV64I-LABEL: fcvt_s_w_load: 168; RV64I: # %bb.0: 169; RV64I-NEXT: addi sp, sp, -16 170; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 171; RV64I-NEXT: lw a0, 0(a0) 172; RV64I-NEXT: call __floatsisf@plt 173; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 174; RV64I-NEXT: addi sp, sp, 16 175; RV64I-NEXT: ret 176 %a = load i32, i32* %p 177 %1 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 178 ret float %1 179} 180 181define float @fcvt_s_wu(i32 %a) nounwind strictfp { 182; CHECKIF-LABEL: fcvt_s_wu: 183; CHECKIF: # %bb.0: 184; CHECKIF-NEXT: fcvt.s.wu fa0, a0 185; CHECKIF-NEXT: ret 186; 187; RV32I-LABEL: fcvt_s_wu: 188; RV32I: # %bb.0: 189; RV32I-NEXT: addi sp, sp, -16 190; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 191; RV32I-NEXT: call __floatunsisf@plt 192; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 193; RV32I-NEXT: addi sp, sp, 16 194; RV32I-NEXT: ret 195; 196; RV64I-LABEL: fcvt_s_wu: 197; RV64I: # %bb.0: 198; RV64I-NEXT: addi sp, sp, -16 199; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 200; RV64I-NEXT: sext.w a0, a0 201; RV64I-NEXT: call __floatunsisf@plt 202; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 203; RV64I-NEXT: addi sp, sp, 16 204; RV64I-NEXT: ret 205 %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 206 ret float %1 207} 208declare float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata, metadata) 209 210define float @fcvt_s_wu_load(i32* %p) nounwind strictfp { 211; RV32IF-LABEL: fcvt_s_wu_load: 212; RV32IF: # %bb.0: 213; RV32IF-NEXT: lw a0, 0(a0) 214; RV32IF-NEXT: fcvt.s.wu fa0, a0 215; RV32IF-NEXT: ret 216; 217; RV64IF-LABEL: fcvt_s_wu_load: 218; RV64IF: # %bb.0: 219; RV64IF-NEXT: lwu a0, 0(a0) 220; RV64IF-NEXT: fcvt.s.wu fa0, a0 221; RV64IF-NEXT: ret 222; 223; RV32I-LABEL: fcvt_s_wu_load: 224; RV32I: # %bb.0: 225; RV32I-NEXT: addi sp, sp, -16 226; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 227; RV32I-NEXT: lw a0, 0(a0) 228; RV32I-NEXT: call __floatunsisf@plt 229; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 230; RV32I-NEXT: addi sp, sp, 16 231; RV32I-NEXT: ret 232; 233; RV64I-LABEL: fcvt_s_wu_load: 234; RV64I: # %bb.0: 235; RV64I-NEXT: addi sp, sp, -16 236; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 237; RV64I-NEXT: lw a0, 0(a0) 238; RV64I-NEXT: call __floatunsisf@plt 239; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 240; RV64I-NEXT: addi sp, sp, 16 241; RV64I-NEXT: ret 242 %a = load i32, i32* %p 243 %1 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 244 ret float %1 245} 246 247define i64 @fcvt_l_s(float %a) nounwind strictfp { 248; RV32IF-LABEL: fcvt_l_s: 249; RV32IF: # %bb.0: 250; RV32IF-NEXT: addi sp, sp, -16 251; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 252; RV32IF-NEXT: call __fixsfdi@plt 253; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 254; RV32IF-NEXT: addi sp, sp, 16 255; RV32IF-NEXT: ret 256; 257; RV64IF-LABEL: fcvt_l_s: 258; RV64IF: # %bb.0: 259; RV64IF-NEXT: fcvt.l.s a0, fa0, rtz 260; RV64IF-NEXT: ret 261; 262; RV32I-LABEL: fcvt_l_s: 263; RV32I: # %bb.0: 264; RV32I-NEXT: addi sp, sp, -16 265; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 266; RV32I-NEXT: call __fixsfdi@plt 267; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 268; RV32I-NEXT: addi sp, sp, 16 269; RV32I-NEXT: ret 270; 271; RV64I-LABEL: fcvt_l_s: 272; RV64I: # %bb.0: 273; RV64I-NEXT: addi sp, sp, -16 274; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 275; RV64I-NEXT: call __fixsfdi@plt 276; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 277; RV64I-NEXT: addi sp, sp, 16 278; RV64I-NEXT: ret 279 %1 = call i64 @llvm.experimental.constrained.fptosi.i64.f32(float %a, metadata !"fpexcept.strict") strictfp 280 ret i64 %1 281} 282declare i64 @llvm.experimental.constrained.fptosi.i64.f32(float, metadata) 283 284define i64 @fcvt_lu_s(float %a) nounwind strictfp { 285; RV32IF-LABEL: fcvt_lu_s: 286; RV32IF: # %bb.0: 287; RV32IF-NEXT: addi sp, sp, -16 288; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 289; RV32IF-NEXT: call __fixunssfdi@plt 290; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 291; RV32IF-NEXT: addi sp, sp, 16 292; RV32IF-NEXT: ret 293; 294; RV64IF-LABEL: fcvt_lu_s: 295; RV64IF: # %bb.0: 296; RV64IF-NEXT: fcvt.lu.s a0, fa0, rtz 297; RV64IF-NEXT: ret 298; 299; RV32I-LABEL: fcvt_lu_s: 300; RV32I: # %bb.0: 301; RV32I-NEXT: addi sp, sp, -16 302; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 303; RV32I-NEXT: call __fixunssfdi@plt 304; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 305; RV32I-NEXT: addi sp, sp, 16 306; RV32I-NEXT: ret 307; 308; RV64I-LABEL: fcvt_lu_s: 309; RV64I: # %bb.0: 310; RV64I-NEXT: addi sp, sp, -16 311; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 312; RV64I-NEXT: call __fixunssfdi@plt 313; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 314; RV64I-NEXT: addi sp, sp, 16 315; RV64I-NEXT: ret 316 %1 = call i64 @llvm.experimental.constrained.fptoui.i64.f32(float %a, metadata !"fpexcept.strict") strictfp 317 ret i64 %1 318} 319declare i64 @llvm.experimental.constrained.fptoui.i64.f32(float, metadata) 320 321define float @fcvt_s_l(i64 %a) nounwind strictfp { 322; RV32IF-LABEL: fcvt_s_l: 323; RV32IF: # %bb.0: 324; RV32IF-NEXT: addi sp, sp, -16 325; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 326; RV32IF-NEXT: call __floatdisf@plt 327; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 328; RV32IF-NEXT: addi sp, sp, 16 329; RV32IF-NEXT: ret 330; 331; RV64IF-LABEL: fcvt_s_l: 332; RV64IF: # %bb.0: 333; RV64IF-NEXT: fcvt.s.l fa0, a0 334; RV64IF-NEXT: ret 335; 336; RV32I-LABEL: fcvt_s_l: 337; RV32I: # %bb.0: 338; RV32I-NEXT: addi sp, sp, -16 339; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 340; RV32I-NEXT: call __floatdisf@plt 341; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 342; RV32I-NEXT: addi sp, sp, 16 343; RV32I-NEXT: ret 344; 345; RV64I-LABEL: fcvt_s_l: 346; RV64I: # %bb.0: 347; RV64I-NEXT: addi sp, sp, -16 348; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 349; RV64I-NEXT: call __floatdisf@plt 350; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 351; RV64I-NEXT: addi sp, sp, 16 352; RV64I-NEXT: ret 353 %1 = call float @llvm.experimental.constrained.sitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 354 ret float %1 355} 356declare float @llvm.experimental.constrained.sitofp.f32.i64(i64, metadata, metadata) 357 358define float @fcvt_s_lu(i64 %a) nounwind strictfp { 359; RV32IF-LABEL: fcvt_s_lu: 360; RV32IF: # %bb.0: 361; RV32IF-NEXT: addi sp, sp, -16 362; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 363; RV32IF-NEXT: call __floatundisf@plt 364; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 365; RV32IF-NEXT: addi sp, sp, 16 366; RV32IF-NEXT: ret 367; 368; RV64IF-LABEL: fcvt_s_lu: 369; RV64IF: # %bb.0: 370; RV64IF-NEXT: fcvt.s.lu fa0, a0 371; RV64IF-NEXT: ret 372; 373; RV32I-LABEL: fcvt_s_lu: 374; RV32I: # %bb.0: 375; RV32I-NEXT: addi sp, sp, -16 376; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 377; RV32I-NEXT: call __floatundisf@plt 378; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 379; RV32I-NEXT: addi sp, sp, 16 380; RV32I-NEXT: ret 381; 382; RV64I-LABEL: fcvt_s_lu: 383; RV64I: # %bb.0: 384; RV64I-NEXT: addi sp, sp, -16 385; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 386; RV64I-NEXT: call __floatundisf@plt 387; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 388; RV64I-NEXT: addi sp, sp, 16 389; RV64I-NEXT: ret 390 %1 = call float @llvm.experimental.constrained.uitofp.f32.i64(i64 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 391 ret float %1 392} 393declare float @llvm.experimental.constrained.uitofp.f32.i64(i64, metadata, metadata) 394 395define float @fcvt_s_w_i8(i8 signext %a) nounwind strictfp { 396; CHECKIF-LABEL: fcvt_s_w_i8: 397; CHECKIF: # %bb.0: 398; CHECKIF-NEXT: fcvt.s.w fa0, a0 399; CHECKIF-NEXT: ret 400; 401; RV32I-LABEL: fcvt_s_w_i8: 402; RV32I: # %bb.0: 403; RV32I-NEXT: addi sp, sp, -16 404; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 405; RV32I-NEXT: call __floatsisf@plt 406; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 407; RV32I-NEXT: addi sp, sp, 16 408; RV32I-NEXT: ret 409; 410; RV64I-LABEL: fcvt_s_w_i8: 411; RV64I: # %bb.0: 412; RV64I-NEXT: addi sp, sp, -16 413; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 414; RV64I-NEXT: call __floatsisf@plt 415; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 416; RV64I-NEXT: addi sp, sp, 16 417; RV64I-NEXT: ret 418 %1 = call float @llvm.experimental.constrained.sitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 419 ret float %1 420} 421declare float @llvm.experimental.constrained.sitofp.f32.i8(i8, metadata, metadata) 422 423define float @fcvt_s_wu_i8(i8 zeroext %a) nounwind strictfp { 424; CHECKIF-LABEL: fcvt_s_wu_i8: 425; CHECKIF: # %bb.0: 426; CHECKIF-NEXT: fcvt.s.wu fa0, a0 427; CHECKIF-NEXT: ret 428; 429; RV32I-LABEL: fcvt_s_wu_i8: 430; RV32I: # %bb.0: 431; RV32I-NEXT: addi sp, sp, -16 432; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 433; RV32I-NEXT: call __floatunsisf@plt 434; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 435; RV32I-NEXT: addi sp, sp, 16 436; RV32I-NEXT: ret 437; 438; RV64I-LABEL: fcvt_s_wu_i8: 439; RV64I: # %bb.0: 440; RV64I-NEXT: addi sp, sp, -16 441; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 442; RV64I-NEXT: call __floatunsisf@plt 443; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 444; RV64I-NEXT: addi sp, sp, 16 445; RV64I-NEXT: ret 446 %1 = call float @llvm.experimental.constrained.uitofp.f32.i8(i8 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") 447 ret float %1 448} 449declare float @llvm.experimental.constrained.uitofp.f32.i8(i8, metadata, metadata) 450 451define float @fcvt_s_w_i16(i16 signext %a) nounwind strictfp { 452; CHECKIF-LABEL: fcvt_s_w_i16: 453; CHECKIF: # %bb.0: 454; CHECKIF-NEXT: fcvt.s.w fa0, a0 455; CHECKIF-NEXT: ret 456; 457; RV32I-LABEL: fcvt_s_w_i16: 458; RV32I: # %bb.0: 459; RV32I-NEXT: addi sp, sp, -16 460; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 461; RV32I-NEXT: call __floatsisf@plt 462; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 463; RV32I-NEXT: addi sp, sp, 16 464; RV32I-NEXT: ret 465; 466; RV64I-LABEL: fcvt_s_w_i16: 467; RV64I: # %bb.0: 468; RV64I-NEXT: addi sp, sp, -16 469; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 470; RV64I-NEXT: call __floatsisf@plt 471; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 472; RV64I-NEXT: addi sp, sp, 16 473; RV64I-NEXT: ret 474 %1 = call float @llvm.experimental.constrained.sitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 475 ret float %1 476} 477declare float @llvm.experimental.constrained.sitofp.f32.i16(i16, metadata, metadata) 478 479define float @fcvt_s_wu_i16(i16 zeroext %a) nounwind strictfp { 480; CHECKIF-LABEL: fcvt_s_wu_i16: 481; CHECKIF: # %bb.0: 482; CHECKIF-NEXT: fcvt.s.wu fa0, a0 483; CHECKIF-NEXT: ret 484; 485; RV32I-LABEL: fcvt_s_wu_i16: 486; RV32I: # %bb.0: 487; RV32I-NEXT: addi sp, sp, -16 488; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 489; RV32I-NEXT: call __floatunsisf@plt 490; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 491; RV32I-NEXT: addi sp, sp, 16 492; RV32I-NEXT: ret 493; 494; RV64I-LABEL: fcvt_s_wu_i16: 495; RV64I: # %bb.0: 496; RV64I-NEXT: addi sp, sp, -16 497; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 498; RV64I-NEXT: call __floatunsisf@plt 499; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 500; RV64I-NEXT: addi sp, sp, 16 501; RV64I-NEXT: ret 502 %1 = call float @llvm.experimental.constrained.uitofp.f32.i16(i16 %a, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 503 ret float %1 504} 505declare float @llvm.experimental.constrained.uitofp.f32.i16(i16, metadata, metadata) 506 507; Make sure we select W version of addi on RV64. 508define signext i32 @fcvt_s_w_demanded_bits(i32 signext %0, float* %1) nounwind { 509; RV32IF-LABEL: fcvt_s_w_demanded_bits: 510; RV32IF: # %bb.0: 511; RV32IF-NEXT: addi a0, a0, 1 512; RV32IF-NEXT: fcvt.s.w ft0, a0 513; RV32IF-NEXT: fsw ft0, 0(a1) 514; RV32IF-NEXT: ret 515; 516; RV64IF-LABEL: fcvt_s_w_demanded_bits: 517; RV64IF: # %bb.0: 518; RV64IF-NEXT: addiw a0, a0, 1 519; RV64IF-NEXT: fcvt.s.w ft0, a0 520; RV64IF-NEXT: fsw ft0, 0(a1) 521; RV64IF-NEXT: ret 522; 523; RV32I-LABEL: fcvt_s_w_demanded_bits: 524; RV32I: # %bb.0: 525; RV32I-NEXT: addi sp, sp, -16 526; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 527; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 528; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill 529; RV32I-NEXT: mv s0, a1 530; RV32I-NEXT: addi s1, a0, 1 531; RV32I-NEXT: mv a0, s1 532; RV32I-NEXT: call __floatsisf@plt 533; RV32I-NEXT: sw a0, 0(s0) 534; RV32I-NEXT: mv a0, s1 535; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 536; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 537; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload 538; RV32I-NEXT: addi sp, sp, 16 539; RV32I-NEXT: ret 540; 541; RV64I-LABEL: fcvt_s_w_demanded_bits: 542; RV64I: # %bb.0: 543; RV64I-NEXT: addi sp, sp, -32 544; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 545; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 546; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 547; RV64I-NEXT: mv s0, a1 548; RV64I-NEXT: addiw s1, a0, 1 549; RV64I-NEXT: mv a0, s1 550; RV64I-NEXT: call __floatsisf@plt 551; RV64I-NEXT: sw a0, 0(s0) 552; RV64I-NEXT: mv a0, s1 553; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 554; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 555; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 556; RV64I-NEXT: addi sp, sp, 32 557; RV64I-NEXT: ret 558 %3 = add i32 %0, 1 559 %4 = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 560 store float %4, float* %1, align 4 561 ret i32 %3 562} 563 564; Make sure we select W version of addi on RV64. 565define signext i32 @fcvt_s_wu_demanded_bits(i32 signext %0, float* %1) nounwind { 566; RV32IF-LABEL: fcvt_s_wu_demanded_bits: 567; RV32IF: # %bb.0: 568; RV32IF-NEXT: addi a0, a0, 1 569; RV32IF-NEXT: fcvt.s.wu ft0, a0 570; RV32IF-NEXT: fsw ft0, 0(a1) 571; RV32IF-NEXT: ret 572; 573; RV64IF-LABEL: fcvt_s_wu_demanded_bits: 574; RV64IF: # %bb.0: 575; RV64IF-NEXT: addiw a0, a0, 1 576; RV64IF-NEXT: fcvt.s.wu ft0, a0 577; RV64IF-NEXT: fsw ft0, 0(a1) 578; RV64IF-NEXT: ret 579; 580; RV32I-LABEL: fcvt_s_wu_demanded_bits: 581; RV32I: # %bb.0: 582; RV32I-NEXT: addi sp, sp, -16 583; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 584; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 585; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill 586; RV32I-NEXT: mv s0, a1 587; RV32I-NEXT: addi s1, a0, 1 588; RV32I-NEXT: mv a0, s1 589; RV32I-NEXT: call __floatunsisf@plt 590; RV32I-NEXT: sw a0, 0(s0) 591; RV32I-NEXT: mv a0, s1 592; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 593; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 594; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload 595; RV32I-NEXT: addi sp, sp, 16 596; RV32I-NEXT: ret 597; 598; RV64I-LABEL: fcvt_s_wu_demanded_bits: 599; RV64I: # %bb.0: 600; RV64I-NEXT: addi sp, sp, -32 601; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 602; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 603; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 604; RV64I-NEXT: mv s0, a1 605; RV64I-NEXT: addiw s1, a0, 1 606; RV64I-NEXT: mv a0, s1 607; RV64I-NEXT: call __floatunsisf@plt 608; RV64I-NEXT: sw a0, 0(s0) 609; RV64I-NEXT: mv a0, s1 610; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 611; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 612; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 613; RV64I-NEXT: addi sp, sp, 32 614; RV64I-NEXT: ret 615 %3 = add i32 %0, 1 616 %4 = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %3, metadata !"round.dynamic", metadata !"fpexcept.strict") strictfp 617 store float %4, float* %1, align 4 618 ret i32 %3 619} 620