1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefix=RV32I %s 4; RUN: llc -mtriple=riscv32 -mattr=+m -verify-machineinstrs < %s \ 5; RUN: | FileCheck -check-prefix=RV32IM %s 6; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 7; RUN: | FileCheck -check-prefix=RV64I %s 8; RUN: llc -mtriple=riscv64 -mattr=+m -verify-machineinstrs < %s \ 9; RUN: | FileCheck -check-prefix=RV64IM %s 10 11define signext i32 @square(i32 %a) nounwind { 12; RV32I-LABEL: square: 13; RV32I: # %bb.0: 14; RV32I-NEXT: addi sp, sp, -16 15; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 16; RV32I-NEXT: mv a1, a0 17; RV32I-NEXT: call __mulsi3@plt 18; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 19; RV32I-NEXT: addi sp, sp, 16 20; RV32I-NEXT: ret 21; 22; RV32IM-LABEL: square: 23; RV32IM: # %bb.0: 24; RV32IM-NEXT: mul a0, a0, a0 25; RV32IM-NEXT: ret 26; 27; RV64I-LABEL: square: 28; RV64I: # %bb.0: 29; RV64I-NEXT: addi sp, sp, -16 30; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 31; RV64I-NEXT: mv a1, a0 32; RV64I-NEXT: call __muldi3@plt 33; RV64I-NEXT: sext.w a0, a0 34; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 35; RV64I-NEXT: addi sp, sp, 16 36; RV64I-NEXT: ret 37; 38; RV64IM-LABEL: square: 39; RV64IM: # %bb.0: 40; RV64IM-NEXT: mulw a0, a0, a0 41; RV64IM-NEXT: ret 42 %1 = mul i32 %a, %a 43 ret i32 %1 44} 45 46define signext i32 @mul(i32 %a, i32 %b) nounwind { 47; RV32I-LABEL: mul: 48; RV32I: # %bb.0: 49; RV32I-NEXT: addi sp, sp, -16 50; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 51; RV32I-NEXT: call __mulsi3@plt 52; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 53; RV32I-NEXT: addi sp, sp, 16 54; RV32I-NEXT: ret 55; 56; RV32IM-LABEL: mul: 57; RV32IM: # %bb.0: 58; RV32IM-NEXT: mul a0, a0, a1 59; RV32IM-NEXT: ret 60; 61; RV64I-LABEL: mul: 62; RV64I: # %bb.0: 63; RV64I-NEXT: addi sp, sp, -16 64; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 65; RV64I-NEXT: call __muldi3@plt 66; RV64I-NEXT: sext.w a0, a0 67; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 68; RV64I-NEXT: addi sp, sp, 16 69; RV64I-NEXT: ret 70; 71; RV64IM-LABEL: mul: 72; RV64IM: # %bb.0: 73; RV64IM-NEXT: mulw a0, a0, a1 74; RV64IM-NEXT: ret 75 %1 = mul i32 %a, %b 76 ret i32 %1 77} 78 79define signext i32 @mul_constant(i32 %a) nounwind { 80; RV32I-LABEL: mul_constant: 81; RV32I: # %bb.0: 82; RV32I-NEXT: slli a1, a0, 2 83; RV32I-NEXT: add a0, a1, a0 84; RV32I-NEXT: ret 85; 86; RV32IM-LABEL: mul_constant: 87; RV32IM: # %bb.0: 88; RV32IM-NEXT: slli a1, a0, 2 89; RV32IM-NEXT: add a0, a1, a0 90; RV32IM-NEXT: ret 91; 92; RV64I-LABEL: mul_constant: 93; RV64I: # %bb.0: 94; RV64I-NEXT: slliw a1, a0, 2 95; RV64I-NEXT: addw a0, a1, a0 96; RV64I-NEXT: ret 97; 98; RV64IM-LABEL: mul_constant: 99; RV64IM: # %bb.0: 100; RV64IM-NEXT: slliw a1, a0, 2 101; RV64IM-NEXT: addw a0, a1, a0 102; RV64IM-NEXT: ret 103 %1 = mul i32 %a, 5 104 ret i32 %1 105} 106 107define i32 @mul_pow2(i32 %a) nounwind { 108; RV32I-LABEL: mul_pow2: 109; RV32I: # %bb.0: 110; RV32I-NEXT: slli a0, a0, 3 111; RV32I-NEXT: ret 112; 113; RV32IM-LABEL: mul_pow2: 114; RV32IM: # %bb.0: 115; RV32IM-NEXT: slli a0, a0, 3 116; RV32IM-NEXT: ret 117; 118; RV64I-LABEL: mul_pow2: 119; RV64I: # %bb.0: 120; RV64I-NEXT: slliw a0, a0, 3 121; RV64I-NEXT: ret 122; 123; RV64IM-LABEL: mul_pow2: 124; RV64IM: # %bb.0: 125; RV64IM-NEXT: slliw a0, a0, 3 126; RV64IM-NEXT: ret 127 %1 = mul i32 %a, 8 128 ret i32 %1 129} 130 131define i64 @mul64(i64 %a, i64 %b) nounwind { 132; RV32I-LABEL: mul64: 133; RV32I: # %bb.0: 134; RV32I-NEXT: addi sp, sp, -16 135; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 136; RV32I-NEXT: call __muldi3@plt 137; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 138; RV32I-NEXT: addi sp, sp, 16 139; RV32I-NEXT: ret 140; 141; RV32IM-LABEL: mul64: 142; RV32IM: # %bb.0: 143; RV32IM-NEXT: mul a3, a0, a3 144; RV32IM-NEXT: mulhu a4, a0, a2 145; RV32IM-NEXT: add a3, a4, a3 146; RV32IM-NEXT: mul a1, a1, a2 147; RV32IM-NEXT: add a1, a3, a1 148; RV32IM-NEXT: mul a0, a0, a2 149; RV32IM-NEXT: ret 150; 151; RV64I-LABEL: mul64: 152; RV64I: # %bb.0: 153; RV64I-NEXT: addi sp, sp, -16 154; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 155; RV64I-NEXT: call __muldi3@plt 156; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 157; RV64I-NEXT: addi sp, sp, 16 158; RV64I-NEXT: ret 159; 160; RV64IM-LABEL: mul64: 161; RV64IM: # %bb.0: 162; RV64IM-NEXT: mul a0, a0, a1 163; RV64IM-NEXT: ret 164 %1 = mul i64 %a, %b 165 ret i64 %1 166} 167 168define i64 @mul64_constant(i64 %a) nounwind { 169; RV32I-LABEL: mul64_constant: 170; RV32I: # %bb.0: 171; RV32I-NEXT: slli a3, a0, 2 172; RV32I-NEXT: add a2, a3, a0 173; RV32I-NEXT: sltu a3, a2, a3 174; RV32I-NEXT: srli a0, a0, 30 175; RV32I-NEXT: slli a4, a1, 2 176; RV32I-NEXT: or a0, a4, a0 177; RV32I-NEXT: add a0, a0, a1 178; RV32I-NEXT: add a1, a0, a3 179; RV32I-NEXT: mv a0, a2 180; RV32I-NEXT: ret 181; 182; RV32IM-LABEL: mul64_constant: 183; RV32IM: # %bb.0: 184; RV32IM-NEXT: li a2, 5 185; RV32IM-NEXT: mulhu a2, a0, a2 186; RV32IM-NEXT: slli a3, a1, 2 187; RV32IM-NEXT: add a1, a3, a1 188; RV32IM-NEXT: add a1, a2, a1 189; RV32IM-NEXT: slli a2, a0, 2 190; RV32IM-NEXT: add a0, a2, a0 191; RV32IM-NEXT: ret 192; 193; RV64I-LABEL: mul64_constant: 194; RV64I: # %bb.0: 195; RV64I-NEXT: slli a1, a0, 2 196; RV64I-NEXT: add a0, a1, a0 197; RV64I-NEXT: ret 198; 199; RV64IM-LABEL: mul64_constant: 200; RV64IM: # %bb.0: 201; RV64IM-NEXT: slli a1, a0, 2 202; RV64IM-NEXT: add a0, a1, a0 203; RV64IM-NEXT: ret 204 %1 = mul i64 %a, 5 205 ret i64 %1 206} 207 208define i32 @mulhs(i32 %a, i32 %b) nounwind { 209; RV32I-LABEL: mulhs: 210; RV32I: # %bb.0: 211; RV32I-NEXT: addi sp, sp, -16 212; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 213; RV32I-NEXT: mv a2, a1 214; RV32I-NEXT: srai a1, a0, 31 215; RV32I-NEXT: srai a3, a2, 31 216; RV32I-NEXT: call __muldi3@plt 217; RV32I-NEXT: mv a0, a1 218; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 219; RV32I-NEXT: addi sp, sp, 16 220; RV32I-NEXT: ret 221; 222; RV32IM-LABEL: mulhs: 223; RV32IM: # %bb.0: 224; RV32IM-NEXT: mulh a0, a0, a1 225; RV32IM-NEXT: ret 226; 227; RV64I-LABEL: mulhs: 228; RV64I: # %bb.0: 229; RV64I-NEXT: addi sp, sp, -16 230; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 231; RV64I-NEXT: sext.w a0, a0 232; RV64I-NEXT: sext.w a1, a1 233; RV64I-NEXT: call __muldi3@plt 234; RV64I-NEXT: srli a0, a0, 32 235; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 236; RV64I-NEXT: addi sp, sp, 16 237; RV64I-NEXT: ret 238; 239; RV64IM-LABEL: mulhs: 240; RV64IM: # %bb.0: 241; RV64IM-NEXT: sext.w a0, a0 242; RV64IM-NEXT: sext.w a1, a1 243; RV64IM-NEXT: mul a0, a0, a1 244; RV64IM-NEXT: srli a0, a0, 32 245; RV64IM-NEXT: ret 246 %1 = sext i32 %a to i64 247 %2 = sext i32 %b to i64 248 %3 = mul i64 %1, %2 249 %4 = lshr i64 %3, 32 250 %5 = trunc i64 %4 to i32 251 ret i32 %5 252} 253 254define i32 @mulhs_positive_constant(i32 %a) nounwind { 255; RV32I-LABEL: mulhs_positive_constant: 256; RV32I: # %bb.0: 257; RV32I-NEXT: srai a1, a0, 31 258; RV32I-NEXT: slli a2, a0, 2 259; RV32I-NEXT: add a3, a2, a0 260; RV32I-NEXT: sltu a2, a3, a2 261; RV32I-NEXT: srli a0, a0, 30 262; RV32I-NEXT: slli a3, a1, 2 263; RV32I-NEXT: or a0, a3, a0 264; RV32I-NEXT: add a0, a0, a1 265; RV32I-NEXT: add a0, a0, a2 266; RV32I-NEXT: ret 267; 268; RV32IM-LABEL: mulhs_positive_constant: 269; RV32IM: # %bb.0: 270; RV32IM-NEXT: li a1, 5 271; RV32IM-NEXT: mulh a0, a0, a1 272; RV32IM-NEXT: ret 273; 274; RV64I-LABEL: mulhs_positive_constant: 275; RV64I: # %bb.0: 276; RV64I-NEXT: sext.w a0, a0 277; RV64I-NEXT: slli a1, a0, 2 278; RV64I-NEXT: add a0, a1, a0 279; RV64I-NEXT: srli a0, a0, 32 280; RV64I-NEXT: ret 281; 282; RV64IM-LABEL: mulhs_positive_constant: 283; RV64IM: # %bb.0: 284; RV64IM-NEXT: sext.w a0, a0 285; RV64IM-NEXT: slli a1, a0, 2 286; RV64IM-NEXT: add a0, a1, a0 287; RV64IM-NEXT: srli a0, a0, 32 288; RV64IM-NEXT: ret 289 %1 = sext i32 %a to i64 290 %2 = mul i64 %1, 5 291 %3 = lshr i64 %2, 32 292 %4 = trunc i64 %3 to i32 293 ret i32 %4 294} 295 296define i32 @mulhs_negative_constant(i32 %a) nounwind { 297; RV32I-LABEL: mulhs_negative_constant: 298; RV32I: # %bb.0: 299; RV32I-NEXT: srai a1, a0, 31 300; RV32I-NEXT: slli a2, a0, 2 301; RV32I-NEXT: add a3, a2, a0 302; RV32I-NEXT: sltu a2, a3, a2 303; RV32I-NEXT: srli a0, a0, 30 304; RV32I-NEXT: slli a4, a1, 2 305; RV32I-NEXT: or a0, a4, a0 306; RV32I-NEXT: add a0, a0, a1 307; RV32I-NEXT: add a0, a0, a2 308; RV32I-NEXT: snez a1, a3 309; RV32I-NEXT: add a0, a0, a1 310; RV32I-NEXT: neg a0, a0 311; RV32I-NEXT: ret 312; 313; RV32IM-LABEL: mulhs_negative_constant: 314; RV32IM: # %bb.0: 315; RV32IM-NEXT: li a1, -5 316; RV32IM-NEXT: mulh a0, a0, a1 317; RV32IM-NEXT: ret 318; 319; RV64I-LABEL: mulhs_negative_constant: 320; RV64I: # %bb.0: 321; RV64I-NEXT: sext.w a0, a0 322; RV64I-NEXT: slli a1, a0, 2 323; RV64I-NEXT: add a0, a1, a0 324; RV64I-NEXT: neg a0, a0 325; RV64I-NEXT: srli a0, a0, 32 326; RV64I-NEXT: ret 327; 328; RV64IM-LABEL: mulhs_negative_constant: 329; RV64IM: # %bb.0: 330; RV64IM-NEXT: sext.w a0, a0 331; RV64IM-NEXT: slli a1, a0, 2 332; RV64IM-NEXT: add a0, a1, a0 333; RV64IM-NEXT: neg a0, a0 334; RV64IM-NEXT: srli a0, a0, 32 335; RV64IM-NEXT: ret 336 %1 = sext i32 %a to i64 337 %2 = mul i64 %1, -5 338 %3 = lshr i64 %2, 32 339 %4 = trunc i64 %3 to i32 340 ret i32 %4 341} 342 343define zeroext i32 @mulhu(i32 zeroext %a, i32 zeroext %b) nounwind { 344; RV32I-LABEL: mulhu: 345; RV32I: # %bb.0: 346; RV32I-NEXT: addi sp, sp, -16 347; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 348; RV32I-NEXT: mv a2, a1 349; RV32I-NEXT: li a1, 0 350; RV32I-NEXT: li a3, 0 351; RV32I-NEXT: call __muldi3@plt 352; RV32I-NEXT: mv a0, a1 353; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 354; RV32I-NEXT: addi sp, sp, 16 355; RV32I-NEXT: ret 356; 357; RV32IM-LABEL: mulhu: 358; RV32IM: # %bb.0: 359; RV32IM-NEXT: mulhu a0, a0, a1 360; RV32IM-NEXT: ret 361; 362; RV64I-LABEL: mulhu: 363; RV64I: # %bb.0: 364; RV64I-NEXT: addi sp, sp, -16 365; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 366; RV64I-NEXT: call __muldi3@plt 367; RV64I-NEXT: srli a0, a0, 32 368; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 369; RV64I-NEXT: addi sp, sp, 16 370; RV64I-NEXT: ret 371; 372; RV64IM-LABEL: mulhu: 373; RV64IM: # %bb.0: 374; RV64IM-NEXT: mul a0, a0, a1 375; RV64IM-NEXT: srli a0, a0, 32 376; RV64IM-NEXT: ret 377 %1 = zext i32 %a to i64 378 %2 = zext i32 %b to i64 379 %3 = mul i64 %1, %2 380 %4 = lshr i64 %3, 32 381 %5 = trunc i64 %4 to i32 382 ret i32 %5 383} 384 385define i32 @mulhsu(i32 %a, i32 %b) nounwind { 386; RV32I-LABEL: mulhsu: 387; RV32I: # %bb.0: 388; RV32I-NEXT: addi sp, sp, -16 389; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 390; RV32I-NEXT: mv a2, a1 391; RV32I-NEXT: srai a3, a1, 31 392; RV32I-NEXT: li a1, 0 393; RV32I-NEXT: call __muldi3@plt 394; RV32I-NEXT: mv a0, a1 395; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 396; RV32I-NEXT: addi sp, sp, 16 397; RV32I-NEXT: ret 398; 399; RV32IM-LABEL: mulhsu: 400; RV32IM: # %bb.0: 401; RV32IM-NEXT: mulhsu a0, a1, a0 402; RV32IM-NEXT: ret 403; 404; RV64I-LABEL: mulhsu: 405; RV64I: # %bb.0: 406; RV64I-NEXT: addi sp, sp, -16 407; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 408; RV64I-NEXT: slli a0, a0, 32 409; RV64I-NEXT: srli a0, a0, 32 410; RV64I-NEXT: sext.w a1, a1 411; RV64I-NEXT: call __muldi3@plt 412; RV64I-NEXT: srli a0, a0, 32 413; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 414; RV64I-NEXT: addi sp, sp, 16 415; RV64I-NEXT: ret 416; 417; RV64IM-LABEL: mulhsu: 418; RV64IM: # %bb.0: 419; RV64IM-NEXT: slli a0, a0, 32 420; RV64IM-NEXT: srli a0, a0, 32 421; RV64IM-NEXT: sext.w a1, a1 422; RV64IM-NEXT: mul a0, a0, a1 423; RV64IM-NEXT: srli a0, a0, 32 424; RV64IM-NEXT: ret 425 %1 = zext i32 %a to i64 426 %2 = sext i32 %b to i64 427 %3 = mul i64 %1, %2 428 %4 = lshr i64 %3, 32 429 %5 = trunc i64 %4 to i32 430 ret i32 %5 431} 432 433define i32 @mulhu_constant(i32 %a) nounwind { 434; RV32I-LABEL: mulhu_constant: 435; RV32I: # %bb.0: 436; RV32I-NEXT: slli a1, a0, 2 437; RV32I-NEXT: add a2, a1, a0 438; RV32I-NEXT: sltu a1, a2, a1 439; RV32I-NEXT: srli a0, a0, 30 440; RV32I-NEXT: add a0, a0, a1 441; RV32I-NEXT: ret 442; 443; RV32IM-LABEL: mulhu_constant: 444; RV32IM: # %bb.0: 445; RV32IM-NEXT: li a1, 5 446; RV32IM-NEXT: mulhu a0, a0, a1 447; RV32IM-NEXT: ret 448; 449; RV64I-LABEL: mulhu_constant: 450; RV64I: # %bb.0: 451; RV64I-NEXT: slli a0, a0, 32 452; RV64I-NEXT: srli a1, a0, 32 453; RV64I-NEXT: srli a0, a0, 30 454; RV64I-NEXT: add a0, a0, a1 455; RV64I-NEXT: srli a0, a0, 32 456; RV64I-NEXT: ret 457; 458; RV64IM-LABEL: mulhu_constant: 459; RV64IM: # %bb.0: 460; RV64IM-NEXT: slli a0, a0, 32 461; RV64IM-NEXT: srli a1, a0, 32 462; RV64IM-NEXT: srli a0, a0, 30 463; RV64IM-NEXT: add a0, a0, a1 464; RV64IM-NEXT: srli a0, a0, 32 465; RV64IM-NEXT: ret 466 %1 = zext i32 %a to i64 467 %2 = mul i64 %1, 5 468 %3 = lshr i64 %2, 32 469 %4 = trunc i64 %3 to i32 470 ret i32 %4 471} 472 473define i32 @muli32_p65(i32 %a) nounwind { 474; RV32I-LABEL: muli32_p65: 475; RV32I: # %bb.0: 476; RV32I-NEXT: slli a1, a0, 6 477; RV32I-NEXT: add a0, a1, a0 478; RV32I-NEXT: ret 479; 480; RV32IM-LABEL: muli32_p65: 481; RV32IM: # %bb.0: 482; RV32IM-NEXT: slli a1, a0, 6 483; RV32IM-NEXT: add a0, a1, a0 484; RV32IM-NEXT: ret 485; 486; RV64I-LABEL: muli32_p65: 487; RV64I: # %bb.0: 488; RV64I-NEXT: slliw a1, a0, 6 489; RV64I-NEXT: addw a0, a1, a0 490; RV64I-NEXT: ret 491; 492; RV64IM-LABEL: muli32_p65: 493; RV64IM: # %bb.0: 494; RV64IM-NEXT: slliw a1, a0, 6 495; RV64IM-NEXT: addw a0, a1, a0 496; RV64IM-NEXT: ret 497 %1 = mul i32 %a, 65 498 ret i32 %1 499} 500 501define i32 @muli32_p63(i32 %a) nounwind { 502; RV32I-LABEL: muli32_p63: 503; RV32I: # %bb.0: 504; RV32I-NEXT: slli a1, a0, 6 505; RV32I-NEXT: sub a0, a1, a0 506; RV32I-NEXT: ret 507; 508; RV32IM-LABEL: muli32_p63: 509; RV32IM: # %bb.0: 510; RV32IM-NEXT: slli a1, a0, 6 511; RV32IM-NEXT: sub a0, a1, a0 512; RV32IM-NEXT: ret 513; 514; RV64I-LABEL: muli32_p63: 515; RV64I: # %bb.0: 516; RV64I-NEXT: slliw a1, a0, 6 517; RV64I-NEXT: subw a0, a1, a0 518; RV64I-NEXT: ret 519; 520; RV64IM-LABEL: muli32_p63: 521; RV64IM: # %bb.0: 522; RV64IM-NEXT: slliw a1, a0, 6 523; RV64IM-NEXT: subw a0, a1, a0 524; RV64IM-NEXT: ret 525 %1 = mul i32 %a, 63 526 ret i32 %1 527} 528 529define i64 @muli64_p65(i64 %a) nounwind { 530; RV32I-LABEL: muli64_p65: 531; RV32I: # %bb.0: 532; RV32I-NEXT: slli a3, a0, 6 533; RV32I-NEXT: add a2, a3, a0 534; RV32I-NEXT: sltu a3, a2, a3 535; RV32I-NEXT: srli a0, a0, 26 536; RV32I-NEXT: slli a4, a1, 6 537; RV32I-NEXT: or a0, a4, a0 538; RV32I-NEXT: add a0, a0, a1 539; RV32I-NEXT: add a1, a0, a3 540; RV32I-NEXT: mv a0, a2 541; RV32I-NEXT: ret 542; 543; RV32IM-LABEL: muli64_p65: 544; RV32IM: # %bb.0: 545; RV32IM-NEXT: li a2, 65 546; RV32IM-NEXT: mulhu a2, a0, a2 547; RV32IM-NEXT: slli a3, a1, 6 548; RV32IM-NEXT: add a1, a3, a1 549; RV32IM-NEXT: add a1, a2, a1 550; RV32IM-NEXT: slli a2, a0, 6 551; RV32IM-NEXT: add a0, a2, a0 552; RV32IM-NEXT: ret 553; 554; RV64I-LABEL: muli64_p65: 555; RV64I: # %bb.0: 556; RV64I-NEXT: slli a1, a0, 6 557; RV64I-NEXT: add a0, a1, a0 558; RV64I-NEXT: ret 559; 560; RV64IM-LABEL: muli64_p65: 561; RV64IM: # %bb.0: 562; RV64IM-NEXT: slli a1, a0, 6 563; RV64IM-NEXT: add a0, a1, a0 564; RV64IM-NEXT: ret 565 %1 = mul i64 %a, 65 566 ret i64 %1 567} 568 569define i64 @muli64_p63(i64 %a) nounwind { 570; RV32I-LABEL: muli64_p63: 571; RV32I: # %bb.0: 572; RV32I-NEXT: slli a2, a0, 6 573; RV32I-NEXT: sltu a3, a2, a0 574; RV32I-NEXT: srli a4, a0, 26 575; RV32I-NEXT: slli a5, a1, 6 576; RV32I-NEXT: or a4, a5, a4 577; RV32I-NEXT: sub a1, a4, a1 578; RV32I-NEXT: sub a1, a1, a3 579; RV32I-NEXT: sub a0, a2, a0 580; RV32I-NEXT: ret 581; 582; RV32IM-LABEL: muli64_p63: 583; RV32IM: # %bb.0: 584; RV32IM-NEXT: li a2, 63 585; RV32IM-NEXT: mulhu a2, a0, a2 586; RV32IM-NEXT: slli a3, a1, 6 587; RV32IM-NEXT: sub a1, a3, a1 588; RV32IM-NEXT: add a1, a2, a1 589; RV32IM-NEXT: slli a2, a0, 6 590; RV32IM-NEXT: sub a0, a2, a0 591; RV32IM-NEXT: ret 592; 593; RV64I-LABEL: muli64_p63: 594; RV64I: # %bb.0: 595; RV64I-NEXT: slli a1, a0, 6 596; RV64I-NEXT: sub a0, a1, a0 597; RV64I-NEXT: ret 598; 599; RV64IM-LABEL: muli64_p63: 600; RV64IM: # %bb.0: 601; RV64IM-NEXT: slli a1, a0, 6 602; RV64IM-NEXT: sub a0, a1, a0 603; RV64IM-NEXT: ret 604 %1 = mul i64 %a, 63 605 ret i64 %1 606} 607 608define i32 @muli32_m63(i32 %a) nounwind { 609; RV32I-LABEL: muli32_m63: 610; RV32I: # %bb.0: 611; RV32I-NEXT: slli a1, a0, 6 612; RV32I-NEXT: sub a0, a0, a1 613; RV32I-NEXT: ret 614; 615; RV32IM-LABEL: muli32_m63: 616; RV32IM: # %bb.0: 617; RV32IM-NEXT: slli a1, a0, 6 618; RV32IM-NEXT: sub a0, a0, a1 619; RV32IM-NEXT: ret 620; 621; RV64I-LABEL: muli32_m63: 622; RV64I: # %bb.0: 623; RV64I-NEXT: slliw a1, a0, 6 624; RV64I-NEXT: subw a0, a0, a1 625; RV64I-NEXT: ret 626; 627; RV64IM-LABEL: muli32_m63: 628; RV64IM: # %bb.0: 629; RV64IM-NEXT: slliw a1, a0, 6 630; RV64IM-NEXT: subw a0, a0, a1 631; RV64IM-NEXT: ret 632 %1 = mul i32 %a, -63 633 ret i32 %1 634} 635 636define i32 @muli32_m65(i32 %a) nounwind { 637; RV32I-LABEL: muli32_m65: 638; RV32I: # %bb.0: 639; RV32I-NEXT: slli a1, a0, 6 640; RV32I-NEXT: add a0, a1, a0 641; RV32I-NEXT: neg a0, a0 642; RV32I-NEXT: ret 643; 644; RV32IM-LABEL: muli32_m65: 645; RV32IM: # %bb.0: 646; RV32IM-NEXT: slli a1, a0, 6 647; RV32IM-NEXT: add a0, a1, a0 648; RV32IM-NEXT: neg a0, a0 649; RV32IM-NEXT: ret 650; 651; RV64I-LABEL: muli32_m65: 652; RV64I: # %bb.0: 653; RV64I-NEXT: slliw a1, a0, 6 654; RV64I-NEXT: addw a0, a1, a0 655; RV64I-NEXT: negw a0, a0 656; RV64I-NEXT: ret 657; 658; RV64IM-LABEL: muli32_m65: 659; RV64IM: # %bb.0: 660; RV64IM-NEXT: slliw a1, a0, 6 661; RV64IM-NEXT: addw a0, a1, a0 662; RV64IM-NEXT: negw a0, a0 663; RV64IM-NEXT: ret 664 %1 = mul i32 %a, -65 665 ret i32 %1 666} 667 668define i64 @muli64_m63(i64 %a) nounwind { 669; RV32I-LABEL: muli64_m63: 670; RV32I: # %bb.0: 671; RV32I-NEXT: slli a2, a0, 6 672; RV32I-NEXT: sltu a3, a0, a2 673; RV32I-NEXT: srli a4, a0, 26 674; RV32I-NEXT: slli a5, a1, 6 675; RV32I-NEXT: or a4, a5, a4 676; RV32I-NEXT: sub a1, a1, a4 677; RV32I-NEXT: sub a1, a1, a3 678; RV32I-NEXT: sub a0, a0, a2 679; RV32I-NEXT: ret 680; 681; RV32IM-LABEL: muli64_m63: 682; RV32IM: # %bb.0: 683; RV32IM-NEXT: slli a2, a1, 6 684; RV32IM-NEXT: sub a1, a1, a2 685; RV32IM-NEXT: li a2, -63 686; RV32IM-NEXT: mulhu a2, a0, a2 687; RV32IM-NEXT: sub a2, a2, a0 688; RV32IM-NEXT: add a1, a2, a1 689; RV32IM-NEXT: slli a2, a0, 6 690; RV32IM-NEXT: sub a0, a0, a2 691; RV32IM-NEXT: ret 692; 693; RV64I-LABEL: muli64_m63: 694; RV64I: # %bb.0: 695; RV64I-NEXT: slli a1, a0, 6 696; RV64I-NEXT: sub a0, a0, a1 697; RV64I-NEXT: ret 698; 699; RV64IM-LABEL: muli64_m63: 700; RV64IM: # %bb.0: 701; RV64IM-NEXT: slli a1, a0, 6 702; RV64IM-NEXT: sub a0, a0, a1 703; RV64IM-NEXT: ret 704 %1 = mul i64 %a, -63 705 ret i64 %1 706} 707 708define i64 @muli64_m65(i64 %a) nounwind { 709; RV32I-LABEL: muli64_m65: 710; RV32I: # %bb.0: 711; RV32I-NEXT: slli a2, a0, 6 712; RV32I-NEXT: add a3, a2, a0 713; RV32I-NEXT: sltu a2, a3, a2 714; RV32I-NEXT: srli a0, a0, 26 715; RV32I-NEXT: slli a4, a1, 6 716; RV32I-NEXT: or a0, a4, a0 717; RV32I-NEXT: add a0, a0, a1 718; RV32I-NEXT: add a0, a0, a2 719; RV32I-NEXT: snez a1, a3 720; RV32I-NEXT: add a0, a0, a1 721; RV32I-NEXT: neg a1, a0 722; RV32I-NEXT: neg a0, a3 723; RV32I-NEXT: ret 724; 725; RV32IM-LABEL: muli64_m65: 726; RV32IM: # %bb.0: 727; RV32IM-NEXT: slli a2, a1, 6 728; RV32IM-NEXT: add a1, a2, a1 729; RV32IM-NEXT: li a2, -65 730; RV32IM-NEXT: mulhu a2, a0, a2 731; RV32IM-NEXT: sub a2, a2, a0 732; RV32IM-NEXT: sub a1, a2, a1 733; RV32IM-NEXT: slli a2, a0, 6 734; RV32IM-NEXT: add a0, a2, a0 735; RV32IM-NEXT: neg a0, a0 736; RV32IM-NEXT: ret 737; 738; RV64I-LABEL: muli64_m65: 739; RV64I: # %bb.0: 740; RV64I-NEXT: slli a1, a0, 6 741; RV64I-NEXT: add a0, a1, a0 742; RV64I-NEXT: neg a0, a0 743; RV64I-NEXT: ret 744; 745; RV64IM-LABEL: muli64_m65: 746; RV64IM: # %bb.0: 747; RV64IM-NEXT: slli a1, a0, 6 748; RV64IM-NEXT: add a0, a1, a0 749; RV64IM-NEXT: neg a0, a0 750; RV64IM-NEXT: ret 751 %1 = mul i64 %a, -65 752 ret i64 %1 753} 754 755define i32 @muli32_p384(i32 %a) nounwind { 756; RV32I-LABEL: muli32_p384: 757; RV32I: # %bb.0: 758; RV32I-NEXT: addi sp, sp, -16 759; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 760; RV32I-NEXT: li a1, 384 761; RV32I-NEXT: call __mulsi3@plt 762; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 763; RV32I-NEXT: addi sp, sp, 16 764; RV32I-NEXT: ret 765; 766; RV32IM-LABEL: muli32_p384: 767; RV32IM: # %bb.0: 768; RV32IM-NEXT: li a1, 384 769; RV32IM-NEXT: mul a0, a0, a1 770; RV32IM-NEXT: ret 771; 772; RV64I-LABEL: muli32_p384: 773; RV64I: # %bb.0: 774; RV64I-NEXT: addi sp, sp, -16 775; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 776; RV64I-NEXT: li a1, 384 777; RV64I-NEXT: call __muldi3@plt 778; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 779; RV64I-NEXT: addi sp, sp, 16 780; RV64I-NEXT: ret 781; 782; RV64IM-LABEL: muli32_p384: 783; RV64IM: # %bb.0: 784; RV64IM-NEXT: li a1, 384 785; RV64IM-NEXT: mulw a0, a0, a1 786; RV64IM-NEXT: ret 787 %1 = mul i32 %a, 384 788 ret i32 %1 789} 790 791define i32 @muli32_p12288(i32 %a) nounwind { 792; RV32I-LABEL: muli32_p12288: 793; RV32I: # %bb.0: 794; RV32I-NEXT: addi sp, sp, -16 795; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 796; RV32I-NEXT: lui a1, 3 797; RV32I-NEXT: call __mulsi3@plt 798; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 799; RV32I-NEXT: addi sp, sp, 16 800; RV32I-NEXT: ret 801; 802; RV32IM-LABEL: muli32_p12288: 803; RV32IM: # %bb.0: 804; RV32IM-NEXT: lui a1, 3 805; RV32IM-NEXT: mul a0, a0, a1 806; RV32IM-NEXT: ret 807; 808; RV64I-LABEL: muli32_p12288: 809; RV64I: # %bb.0: 810; RV64I-NEXT: addi sp, sp, -16 811; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 812; RV64I-NEXT: lui a1, 3 813; RV64I-NEXT: call __muldi3@plt 814; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 815; RV64I-NEXT: addi sp, sp, 16 816; RV64I-NEXT: ret 817; 818; RV64IM-LABEL: muli32_p12288: 819; RV64IM: # %bb.0: 820; RV64IM-NEXT: lui a1, 3 821; RV64IM-NEXT: mulw a0, a0, a1 822; RV64IM-NEXT: ret 823 %1 = mul i32 %a, 12288 824 ret i32 %1 825} 826 827define i32 @muli32_p4352(i32 %a) nounwind { 828; RV32I-LABEL: muli32_p4352: 829; RV32I: # %bb.0: 830; RV32I-NEXT: slli a1, a0, 8 831; RV32I-NEXT: slli a0, a0, 12 832; RV32I-NEXT: add a0, a0, a1 833; RV32I-NEXT: ret 834; 835; RV32IM-LABEL: muli32_p4352: 836; RV32IM: # %bb.0: 837; RV32IM-NEXT: lui a1, 1 838; RV32IM-NEXT: addi a1, a1, 256 839; RV32IM-NEXT: mul a0, a0, a1 840; RV32IM-NEXT: ret 841; 842; RV64I-LABEL: muli32_p4352: 843; RV64I: # %bb.0: 844; RV64I-NEXT: slliw a1, a0, 8 845; RV64I-NEXT: slliw a0, a0, 12 846; RV64I-NEXT: addw a0, a0, a1 847; RV64I-NEXT: ret 848; 849; RV64IM-LABEL: muli32_p4352: 850; RV64IM: # %bb.0: 851; RV64IM-NEXT: slliw a1, a0, 8 852; RV64IM-NEXT: slliw a0, a0, 12 853; RV64IM-NEXT: addw a0, a0, a1 854; RV64IM-NEXT: ret 855 %1 = mul i32 %a, 4352 856 ret i32 %1 857} 858 859define i32 @muli32_p3840(i32 %a) nounwind { 860; RV32I-LABEL: muli32_p3840: 861; RV32I: # %bb.0: 862; RV32I-NEXT: slli a1, a0, 8 863; RV32I-NEXT: slli a0, a0, 12 864; RV32I-NEXT: sub a0, a0, a1 865; RV32I-NEXT: ret 866; 867; RV32IM-LABEL: muli32_p3840: 868; RV32IM: # %bb.0: 869; RV32IM-NEXT: lui a1, 1 870; RV32IM-NEXT: addi a1, a1, -256 871; RV32IM-NEXT: mul a0, a0, a1 872; RV32IM-NEXT: ret 873; 874; RV64I-LABEL: muli32_p3840: 875; RV64I: # %bb.0: 876; RV64I-NEXT: slliw a1, a0, 8 877; RV64I-NEXT: slliw a0, a0, 12 878; RV64I-NEXT: subw a0, a0, a1 879; RV64I-NEXT: ret 880; 881; RV64IM-LABEL: muli32_p3840: 882; RV64IM: # %bb.0: 883; RV64IM-NEXT: slliw a1, a0, 8 884; RV64IM-NEXT: slliw a0, a0, 12 885; RV64IM-NEXT: subw a0, a0, a1 886; RV64IM-NEXT: ret 887 %1 = mul i32 %a, 3840 888 ret i32 %1 889} 890 891define i32 @muli32_m3840(i32 %a) nounwind { 892; RV32I-LABEL: muli32_m3840: 893; RV32I: # %bb.0: 894; RV32I-NEXT: slli a1, a0, 12 895; RV32I-NEXT: slli a0, a0, 8 896; RV32I-NEXT: sub a0, a0, a1 897; RV32I-NEXT: ret 898; 899; RV32IM-LABEL: muli32_m3840: 900; RV32IM: # %bb.0: 901; RV32IM-NEXT: lui a1, 1048575 902; RV32IM-NEXT: addi a1, a1, 256 903; RV32IM-NEXT: mul a0, a0, a1 904; RV32IM-NEXT: ret 905; 906; RV64I-LABEL: muli32_m3840: 907; RV64I: # %bb.0: 908; RV64I-NEXT: slliw a1, a0, 12 909; RV64I-NEXT: slliw a0, a0, 8 910; RV64I-NEXT: subw a0, a0, a1 911; RV64I-NEXT: ret 912; 913; RV64IM-LABEL: muli32_m3840: 914; RV64IM: # %bb.0: 915; RV64IM-NEXT: slliw a1, a0, 12 916; RV64IM-NEXT: slliw a0, a0, 8 917; RV64IM-NEXT: subw a0, a0, a1 918; RV64IM-NEXT: ret 919 %1 = mul i32 %a, -3840 920 ret i32 %1 921} 922 923define i32 @muli32_m4352(i32 %a) nounwind { 924; RV32I-LABEL: muli32_m4352: 925; RV32I: # %bb.0: 926; RV32I-NEXT: addi sp, sp, -16 927; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 928; RV32I-NEXT: lui a1, 1048575 929; RV32I-NEXT: addi a1, a1, -256 930; RV32I-NEXT: call __mulsi3@plt 931; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 932; RV32I-NEXT: addi sp, sp, 16 933; RV32I-NEXT: ret 934; 935; RV32IM-LABEL: muli32_m4352: 936; RV32IM: # %bb.0: 937; RV32IM-NEXT: lui a1, 1048575 938; RV32IM-NEXT: addi a1, a1, -256 939; RV32IM-NEXT: mul a0, a0, a1 940; RV32IM-NEXT: ret 941; 942; RV64I-LABEL: muli32_m4352: 943; RV64I: # %bb.0: 944; RV64I-NEXT: addi sp, sp, -16 945; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 946; RV64I-NEXT: lui a1, 1048575 947; RV64I-NEXT: addiw a1, a1, -256 948; RV64I-NEXT: call __muldi3@plt 949; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 950; RV64I-NEXT: addi sp, sp, 16 951; RV64I-NEXT: ret 952; 953; RV64IM-LABEL: muli32_m4352: 954; RV64IM: # %bb.0: 955; RV64IM-NEXT: lui a1, 1048575 956; RV64IM-NEXT: addiw a1, a1, -256 957; RV64IM-NEXT: mulw a0, a0, a1 958; RV64IM-NEXT: ret 959 %1 = mul i32 %a, -4352 960 ret i32 %1 961} 962 963define i64 @muli64_p4352(i64 %a) nounwind { 964; RV32I-LABEL: muli64_p4352: 965; RV32I: # %bb.0: 966; RV32I-NEXT: srli a2, a0, 24 967; RV32I-NEXT: slli a3, a1, 8 968; RV32I-NEXT: or a2, a3, a2 969; RV32I-NEXT: srli a3, a0, 20 970; RV32I-NEXT: slli a1, a1, 12 971; RV32I-NEXT: or a1, a1, a3 972; RV32I-NEXT: add a1, a1, a2 973; RV32I-NEXT: slli a2, a0, 8 974; RV32I-NEXT: slli a3, a0, 12 975; RV32I-NEXT: add a0, a3, a2 976; RV32I-NEXT: sltu a2, a0, a3 977; RV32I-NEXT: add a1, a1, a2 978; RV32I-NEXT: ret 979; 980; RV32IM-LABEL: muli64_p4352: 981; RV32IM: # %bb.0: 982; RV32IM-NEXT: lui a2, 1 983; RV32IM-NEXT: addi a2, a2, 256 984; RV32IM-NEXT: mul a1, a1, a2 985; RV32IM-NEXT: mulhu a3, a0, a2 986; RV32IM-NEXT: add a1, a3, a1 987; RV32IM-NEXT: mul a0, a0, a2 988; RV32IM-NEXT: ret 989; 990; RV64I-LABEL: muli64_p4352: 991; RV64I: # %bb.0: 992; RV64I-NEXT: slli a1, a0, 8 993; RV64I-NEXT: slli a0, a0, 12 994; RV64I-NEXT: add a0, a0, a1 995; RV64I-NEXT: ret 996; 997; RV64IM-LABEL: muli64_p4352: 998; RV64IM: # %bb.0: 999; RV64IM-NEXT: lui a1, 1 1000; RV64IM-NEXT: addiw a1, a1, 256 1001; RV64IM-NEXT: mul a0, a0, a1 1002; RV64IM-NEXT: ret 1003 %1 = mul i64 %a, 4352 1004 ret i64 %1 1005} 1006 1007define i64 @muli64_p3840(i64 %a) nounwind { 1008; RV32I-LABEL: muli64_p3840: 1009; RV32I: # %bb.0: 1010; RV32I-NEXT: srli a2, a0, 24 1011; RV32I-NEXT: slli a3, a1, 8 1012; RV32I-NEXT: or a2, a3, a2 1013; RV32I-NEXT: srli a3, a0, 20 1014; RV32I-NEXT: slli a1, a1, 12 1015; RV32I-NEXT: or a1, a1, a3 1016; RV32I-NEXT: sub a1, a1, a2 1017; RV32I-NEXT: slli a2, a0, 8 1018; RV32I-NEXT: slli a0, a0, 12 1019; RV32I-NEXT: sltu a3, a0, a2 1020; RV32I-NEXT: sub a1, a1, a3 1021; RV32I-NEXT: sub a0, a0, a2 1022; RV32I-NEXT: ret 1023; 1024; RV32IM-LABEL: muli64_p3840: 1025; RV32IM: # %bb.0: 1026; RV32IM-NEXT: lui a2, 1 1027; RV32IM-NEXT: addi a2, a2, -256 1028; RV32IM-NEXT: mul a1, a1, a2 1029; RV32IM-NEXT: mulhu a3, a0, a2 1030; RV32IM-NEXT: add a1, a3, a1 1031; RV32IM-NEXT: mul a0, a0, a2 1032; RV32IM-NEXT: ret 1033; 1034; RV64I-LABEL: muli64_p3840: 1035; RV64I: # %bb.0: 1036; RV64I-NEXT: slli a1, a0, 8 1037; RV64I-NEXT: slli a0, a0, 12 1038; RV64I-NEXT: sub a0, a0, a1 1039; RV64I-NEXT: ret 1040; 1041; RV64IM-LABEL: muli64_p3840: 1042; RV64IM: # %bb.0: 1043; RV64IM-NEXT: lui a1, 1 1044; RV64IM-NEXT: addiw a1, a1, -256 1045; RV64IM-NEXT: mul a0, a0, a1 1046; RV64IM-NEXT: ret 1047 %1 = mul i64 %a, 3840 1048 ret i64 %1 1049} 1050 1051define i64 @muli64_m4352(i64 %a) nounwind { 1052; RV32I-LABEL: muli64_m4352: 1053; RV32I: # %bb.0: 1054; RV32I-NEXT: addi sp, sp, -16 1055; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1056; RV32I-NEXT: lui a2, 1048575 1057; RV32I-NEXT: addi a2, a2, -256 1058; RV32I-NEXT: li a3, -1 1059; RV32I-NEXT: call __muldi3@plt 1060; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1061; RV32I-NEXT: addi sp, sp, 16 1062; RV32I-NEXT: ret 1063; 1064; RV32IM-LABEL: muli64_m4352: 1065; RV32IM: # %bb.0: 1066; RV32IM-NEXT: lui a2, 1048575 1067; RV32IM-NEXT: addi a2, a2, -256 1068; RV32IM-NEXT: mul a1, a1, a2 1069; RV32IM-NEXT: mulhu a3, a0, a2 1070; RV32IM-NEXT: sub a3, a3, a0 1071; RV32IM-NEXT: add a1, a3, a1 1072; RV32IM-NEXT: mul a0, a0, a2 1073; RV32IM-NEXT: ret 1074; 1075; RV64I-LABEL: muli64_m4352: 1076; RV64I: # %bb.0: 1077; RV64I-NEXT: addi sp, sp, -16 1078; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1079; RV64I-NEXT: lui a1, 1048575 1080; RV64I-NEXT: addiw a1, a1, -256 1081; RV64I-NEXT: call __muldi3@plt 1082; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1083; RV64I-NEXT: addi sp, sp, 16 1084; RV64I-NEXT: ret 1085; 1086; RV64IM-LABEL: muli64_m4352: 1087; RV64IM: # %bb.0: 1088; RV64IM-NEXT: lui a1, 1048575 1089; RV64IM-NEXT: addiw a1, a1, -256 1090; RV64IM-NEXT: mul a0, a0, a1 1091; RV64IM-NEXT: ret 1092 %1 = mul i64 %a, -4352 1093 ret i64 %1 1094} 1095 1096define i64 @muli64_m3840(i64 %a) nounwind { 1097; RV32I-LABEL: muli64_m3840: 1098; RV32I: # %bb.0: 1099; RV32I-NEXT: srli a2, a0, 20 1100; RV32I-NEXT: slli a3, a1, 12 1101; RV32I-NEXT: or a2, a3, a2 1102; RV32I-NEXT: srli a3, a0, 24 1103; RV32I-NEXT: slli a1, a1, 8 1104; RV32I-NEXT: or a1, a1, a3 1105; RV32I-NEXT: sub a1, a1, a2 1106; RV32I-NEXT: slli a2, a0, 12 1107; RV32I-NEXT: slli a0, a0, 8 1108; RV32I-NEXT: sltu a3, a0, a2 1109; RV32I-NEXT: sub a1, a1, a3 1110; RV32I-NEXT: sub a0, a0, a2 1111; RV32I-NEXT: ret 1112; 1113; RV32IM-LABEL: muli64_m3840: 1114; RV32IM: # %bb.0: 1115; RV32IM-NEXT: lui a2, 1048575 1116; RV32IM-NEXT: addi a2, a2, 256 1117; RV32IM-NEXT: mul a1, a1, a2 1118; RV32IM-NEXT: mulhu a3, a0, a2 1119; RV32IM-NEXT: sub a3, a3, a0 1120; RV32IM-NEXT: add a1, a3, a1 1121; RV32IM-NEXT: mul a0, a0, a2 1122; RV32IM-NEXT: ret 1123; 1124; RV64I-LABEL: muli64_m3840: 1125; RV64I: # %bb.0: 1126; RV64I-NEXT: slli a1, a0, 12 1127; RV64I-NEXT: slli a0, a0, 8 1128; RV64I-NEXT: sub a0, a0, a1 1129; RV64I-NEXT: ret 1130; 1131; RV64IM-LABEL: muli64_m3840: 1132; RV64IM: # %bb.0: 1133; RV64IM-NEXT: lui a1, 1048575 1134; RV64IM-NEXT: addiw a1, a1, 256 1135; RV64IM-NEXT: mul a0, a0, a1 1136; RV64IM-NEXT: ret 1137 %1 = mul i64 %a, -3840 1138 ret i64 %1 1139} 1140 1141define i128 @muli128_m3840(i128 %a) nounwind { 1142; RV32I-LABEL: muli128_m3840: 1143; RV32I: # %bb.0: 1144; RV32I-NEXT: lw a4, 4(a1) 1145; RV32I-NEXT: lw a3, 8(a1) 1146; RV32I-NEXT: lw a6, 0(a1) 1147; RV32I-NEXT: lw a5, 12(a1) 1148; RV32I-NEXT: srli a1, a4, 20 1149; RV32I-NEXT: slli a2, a3, 12 1150; RV32I-NEXT: or a1, a2, a1 1151; RV32I-NEXT: srli a2, a4, 24 1152; RV32I-NEXT: slli a7, a3, 8 1153; RV32I-NEXT: or a2, a7, a2 1154; RV32I-NEXT: sltu t0, a2, a1 1155; RV32I-NEXT: srli a7, a3, 20 1156; RV32I-NEXT: slli t1, a5, 12 1157; RV32I-NEXT: or a7, t1, a7 1158; RV32I-NEXT: srli a3, a3, 24 1159; RV32I-NEXT: slli a5, a5, 8 1160; RV32I-NEXT: or a3, a5, a3 1161; RV32I-NEXT: sub t1, a3, a7 1162; RV32I-NEXT: srli a3, a6, 20 1163; RV32I-NEXT: slli a5, a4, 12 1164; RV32I-NEXT: or a3, a5, a3 1165; RV32I-NEXT: srli a5, a6, 24 1166; RV32I-NEXT: slli a4, a4, 8 1167; RV32I-NEXT: or a5, a4, a5 1168; RV32I-NEXT: slli a4, a6, 12 1169; RV32I-NEXT: slli a6, a6, 8 1170; RV32I-NEXT: sltu a7, a6, a4 1171; RV32I-NEXT: sub t0, t1, t0 1172; RV32I-NEXT: mv t1, a7 1173; RV32I-NEXT: beq a5, a3, .LBB30_2 1174; RV32I-NEXT: # %bb.1: 1175; RV32I-NEXT: sltu t1, a5, a3 1176; RV32I-NEXT: .LBB30_2: 1177; RV32I-NEXT: sub a1, a2, a1 1178; RV32I-NEXT: sltu a2, a1, t1 1179; RV32I-NEXT: sub a2, t0, a2 1180; RV32I-NEXT: sub a1, a1, t1 1181; RV32I-NEXT: sub a3, a5, a3 1182; RV32I-NEXT: sub a3, a3, a7 1183; RV32I-NEXT: sub a4, a6, a4 1184; RV32I-NEXT: sw a4, 0(a0) 1185; RV32I-NEXT: sw a3, 4(a0) 1186; RV32I-NEXT: sw a1, 8(a0) 1187; RV32I-NEXT: sw a2, 12(a0) 1188; RV32I-NEXT: ret 1189; 1190; RV32IM-LABEL: muli128_m3840: 1191; RV32IM: # %bb.0: 1192; RV32IM-NEXT: addi sp, sp, -16 1193; RV32IM-NEXT: sw s0, 12(sp) # 4-byte Folded Spill 1194; RV32IM-NEXT: sw s1, 8(sp) # 4-byte Folded Spill 1195; RV32IM-NEXT: lw a2, 12(a1) 1196; RV32IM-NEXT: lw a3, 8(a1) 1197; RV32IM-NEXT: lw a4, 0(a1) 1198; RV32IM-NEXT: lw a1, 4(a1) 1199; RV32IM-NEXT: lui a5, 1048575 1200; RV32IM-NEXT: addi a5, a5, 256 1201; RV32IM-NEXT: mulhu a6, a4, a5 1202; RV32IM-NEXT: mul a7, a1, a5 1203; RV32IM-NEXT: add a6, a7, a6 1204; RV32IM-NEXT: sltu a7, a6, a7 1205; RV32IM-NEXT: mulhu t0, a1, a5 1206; RV32IM-NEXT: add a7, t0, a7 1207; RV32IM-NEXT: sub a6, a6, a4 1208; RV32IM-NEXT: neg t0, a4 1209; RV32IM-NEXT: sltu t1, a6, t0 1210; RV32IM-NEXT: li t2, -1 1211; RV32IM-NEXT: mulhu t3, a4, t2 1212; RV32IM-NEXT: add t1, t3, t1 1213; RV32IM-NEXT: add t1, a7, t1 1214; RV32IM-NEXT: sub t4, t1, a1 1215; RV32IM-NEXT: mul t5, a3, a5 1216; RV32IM-NEXT: sub t5, t5, a4 1217; RV32IM-NEXT: add t6, t4, t5 1218; RV32IM-NEXT: sltu s0, t6, t4 1219; RV32IM-NEXT: neg s1, a1 1220; RV32IM-NEXT: sltu t4, t4, s1 1221; RV32IM-NEXT: sltu a7, t1, a7 1222; RV32IM-NEXT: mulhu t1, a1, t2 1223; RV32IM-NEXT: add a7, t1, a7 1224; RV32IM-NEXT: add a7, a7, t4 1225; RV32IM-NEXT: sltu t0, t5, t0 1226; RV32IM-NEXT: mul a2, a2, a5 1227; RV32IM-NEXT: mulhu t1, a3, a5 1228; RV32IM-NEXT: sub a3, t1, a3 1229; RV32IM-NEXT: add a2, a3, a2 1230; RV32IM-NEXT: sub a3, t3, a4 1231; RV32IM-NEXT: sub a1, a3, a1 1232; RV32IM-NEXT: add a1, a1, a2 1233; RV32IM-NEXT: add a1, a1, t0 1234; RV32IM-NEXT: add a1, a7, a1 1235; RV32IM-NEXT: add a1, a1, s0 1236; RV32IM-NEXT: mul a2, a4, a5 1237; RV32IM-NEXT: sw a2, 0(a0) 1238; RV32IM-NEXT: sw a6, 4(a0) 1239; RV32IM-NEXT: sw t6, 8(a0) 1240; RV32IM-NEXT: sw a1, 12(a0) 1241; RV32IM-NEXT: lw s0, 12(sp) # 4-byte Folded Reload 1242; RV32IM-NEXT: lw s1, 8(sp) # 4-byte Folded Reload 1243; RV32IM-NEXT: addi sp, sp, 16 1244; RV32IM-NEXT: ret 1245; 1246; RV64I-LABEL: muli128_m3840: 1247; RV64I: # %bb.0: 1248; RV64I-NEXT: srli a2, a0, 52 1249; RV64I-NEXT: slli a3, a1, 12 1250; RV64I-NEXT: or a2, a3, a2 1251; RV64I-NEXT: srli a3, a0, 56 1252; RV64I-NEXT: slli a1, a1, 8 1253; RV64I-NEXT: or a1, a1, a3 1254; RV64I-NEXT: sub a1, a1, a2 1255; RV64I-NEXT: slli a2, a0, 12 1256; RV64I-NEXT: slli a0, a0, 8 1257; RV64I-NEXT: sltu a3, a0, a2 1258; RV64I-NEXT: sub a1, a1, a3 1259; RV64I-NEXT: sub a0, a0, a2 1260; RV64I-NEXT: ret 1261; 1262; RV64IM-LABEL: muli128_m3840: 1263; RV64IM: # %bb.0: 1264; RV64IM-NEXT: lui a2, 1048575 1265; RV64IM-NEXT: addiw a2, a2, 256 1266; RV64IM-NEXT: mul a1, a1, a2 1267; RV64IM-NEXT: mulhu a3, a0, a2 1268; RV64IM-NEXT: sub a3, a3, a0 1269; RV64IM-NEXT: add a1, a3, a1 1270; RV64IM-NEXT: mul a0, a0, a2 1271; RV64IM-NEXT: ret 1272 %1 = mul i128 %a, -3840 1273 ret i128 %1 1274} 1275 1276define i128 @muli128_m63(i128 %a) nounwind { 1277; RV32I-LABEL: muli128_m63: 1278; RV32I: # %bb.0: 1279; RV32I-NEXT: lw a2, 0(a1) 1280; RV32I-NEXT: lw a5, 12(a1) 1281; RV32I-NEXT: lw a7, 8(a1) 1282; RV32I-NEXT: lw a3, 4(a1) 1283; RV32I-NEXT: slli a1, a2, 6 1284; RV32I-NEXT: sltu a4, a2, a1 1285; RV32I-NEXT: srli a6, a2, 26 1286; RV32I-NEXT: slli t0, a3, 6 1287; RV32I-NEXT: or a6, t0, a6 1288; RV32I-NEXT: mv t0, a4 1289; RV32I-NEXT: beq a3, a6, .LBB31_2 1290; RV32I-NEXT: # %bb.1: 1291; RV32I-NEXT: sltu t0, a3, a6 1292; RV32I-NEXT: .LBB31_2: 1293; RV32I-NEXT: srli t1, a3, 26 1294; RV32I-NEXT: slli t2, a7, 6 1295; RV32I-NEXT: or t1, t2, t1 1296; RV32I-NEXT: sub t2, a7, t1 1297; RV32I-NEXT: sltu t3, t2, t0 1298; RV32I-NEXT: sltu t1, a7, t1 1299; RV32I-NEXT: srli a7, a7, 26 1300; RV32I-NEXT: slli t4, a5, 6 1301; RV32I-NEXT: or a7, t4, a7 1302; RV32I-NEXT: sub a5, a5, a7 1303; RV32I-NEXT: sub a5, a5, t1 1304; RV32I-NEXT: sub a5, a5, t3 1305; RV32I-NEXT: sub a7, t2, t0 1306; RV32I-NEXT: sub a3, a3, a6 1307; RV32I-NEXT: sub a3, a3, a4 1308; RV32I-NEXT: sub a1, a2, a1 1309; RV32I-NEXT: sw a1, 0(a0) 1310; RV32I-NEXT: sw a3, 4(a0) 1311; RV32I-NEXT: sw a7, 8(a0) 1312; RV32I-NEXT: sw a5, 12(a0) 1313; RV32I-NEXT: ret 1314; 1315; RV32IM-LABEL: muli128_m63: 1316; RV32IM: # %bb.0: 1317; RV32IM-NEXT: addi sp, sp, -16 1318; RV32IM-NEXT: sw s0, 12(sp) # 4-byte Folded Spill 1319; RV32IM-NEXT: sw s1, 8(sp) # 4-byte Folded Spill 1320; RV32IM-NEXT: lw a2, 12(a1) 1321; RV32IM-NEXT: lw a3, 0(a1) 1322; RV32IM-NEXT: lw a4, 4(a1) 1323; RV32IM-NEXT: lw a1, 8(a1) 1324; RV32IM-NEXT: li a5, -63 1325; RV32IM-NEXT: mulhu a6, a3, a5 1326; RV32IM-NEXT: slli a7, a4, 6 1327; RV32IM-NEXT: sub a7, a7, a4 1328; RV32IM-NEXT: sub a6, a6, a7 1329; RV32IM-NEXT: neg a7, a7 1330; RV32IM-NEXT: sltu a7, a6, a7 1331; RV32IM-NEXT: mulhu t0, a4, a5 1332; RV32IM-NEXT: add a7, t0, a7 1333; RV32IM-NEXT: sub a6, a6, a3 1334; RV32IM-NEXT: neg t0, a3 1335; RV32IM-NEXT: sltu t1, a6, t0 1336; RV32IM-NEXT: li t2, -1 1337; RV32IM-NEXT: mulhu t3, a3, t2 1338; RV32IM-NEXT: add t1, t3, t1 1339; RV32IM-NEXT: add t1, a7, t1 1340; RV32IM-NEXT: sub t4, t1, a4 1341; RV32IM-NEXT: slli t5, a1, 6 1342; RV32IM-NEXT: sub t5, t5, a1 1343; RV32IM-NEXT: add t5, t5, a3 1344; RV32IM-NEXT: sub t6, t4, t5 1345; RV32IM-NEXT: sltu s0, t6, t4 1346; RV32IM-NEXT: neg s1, a4 1347; RV32IM-NEXT: sltu t4, t4, s1 1348; RV32IM-NEXT: sltu a7, t1, a7 1349; RV32IM-NEXT: mulhu t1, a4, t2 1350; RV32IM-NEXT: add a7, t1, a7 1351; RV32IM-NEXT: add a7, a7, t4 1352; RV32IM-NEXT: slli t1, a2, 6 1353; RV32IM-NEXT: sub a2, a2, t1 1354; RV32IM-NEXT: mulhu a5, a1, a5 1355; RV32IM-NEXT: sub a1, a5, a1 1356; RV32IM-NEXT: add a1, a1, a2 1357; RV32IM-NEXT: sub a2, t3, a3 1358; RV32IM-NEXT: sub a2, a2, a4 1359; RV32IM-NEXT: add a1, a2, a1 1360; RV32IM-NEXT: neg a2, t5 1361; RV32IM-NEXT: sltu a2, a2, t0 1362; RV32IM-NEXT: add a1, a1, a2 1363; RV32IM-NEXT: add a1, a7, a1 1364; RV32IM-NEXT: add a1, a1, s0 1365; RV32IM-NEXT: slli a2, a3, 6 1366; RV32IM-NEXT: sub a2, a3, a2 1367; RV32IM-NEXT: sw a2, 0(a0) 1368; RV32IM-NEXT: sw a6, 4(a0) 1369; RV32IM-NEXT: sw t6, 8(a0) 1370; RV32IM-NEXT: sw a1, 12(a0) 1371; RV32IM-NEXT: lw s0, 12(sp) # 4-byte Folded Reload 1372; RV32IM-NEXT: lw s1, 8(sp) # 4-byte Folded Reload 1373; RV32IM-NEXT: addi sp, sp, 16 1374; RV32IM-NEXT: ret 1375; 1376; RV64I-LABEL: muli128_m63: 1377; RV64I: # %bb.0: 1378; RV64I-NEXT: slli a2, a0, 6 1379; RV64I-NEXT: sltu a3, a0, a2 1380; RV64I-NEXT: srli a4, a0, 58 1381; RV64I-NEXT: slli a5, a1, 6 1382; RV64I-NEXT: or a4, a5, a4 1383; RV64I-NEXT: sub a1, a1, a4 1384; RV64I-NEXT: sub a1, a1, a3 1385; RV64I-NEXT: sub a0, a0, a2 1386; RV64I-NEXT: ret 1387; 1388; RV64IM-LABEL: muli128_m63: 1389; RV64IM: # %bb.0: 1390; RV64IM-NEXT: slli a2, a1, 6 1391; RV64IM-NEXT: sub a1, a1, a2 1392; RV64IM-NEXT: li a2, -63 1393; RV64IM-NEXT: mulhu a2, a0, a2 1394; RV64IM-NEXT: sub a2, a2, a0 1395; RV64IM-NEXT: add a1, a2, a1 1396; RV64IM-NEXT: slli a2, a0, 6 1397; RV64IM-NEXT: sub a0, a0, a2 1398; RV64IM-NEXT: ret 1399 %1 = mul i128 %a, -63 1400 ret i128 %1 1401} 1402 1403define i64 @mulhsu_i64(i64 %a, i64 %b) nounwind { 1404; RV32I-LABEL: mulhsu_i64: 1405; RV32I: # %bb.0: 1406; RV32I-NEXT: addi sp, sp, -48 1407; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill 1408; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill 1409; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill 1410; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill 1411; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill 1412; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill 1413; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill 1414; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill 1415; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill 1416; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill 1417; RV32I-NEXT: sw s9, 4(sp) # 4-byte Folded Spill 1418; RV32I-NEXT: mv s2, a3 1419; RV32I-NEXT: mv s3, a2 1420; RV32I-NEXT: mv s0, a1 1421; RV32I-NEXT: mv s1, a0 1422; RV32I-NEXT: srai s4, a3, 31 1423; RV32I-NEXT: li a1, 0 1424; RV32I-NEXT: li a3, 0 1425; RV32I-NEXT: call __muldi3@plt 1426; RV32I-NEXT: mv s5, a1 1427; RV32I-NEXT: mv a0, s0 1428; RV32I-NEXT: li a1, 0 1429; RV32I-NEXT: mv a2, s3 1430; RV32I-NEXT: li a3, 0 1431; RV32I-NEXT: call __muldi3@plt 1432; RV32I-NEXT: add s5, a0, s5 1433; RV32I-NEXT: sltu a0, s5, a0 1434; RV32I-NEXT: add s7, a1, a0 1435; RV32I-NEXT: mv a0, s1 1436; RV32I-NEXT: li a1, 0 1437; RV32I-NEXT: mv a2, s2 1438; RV32I-NEXT: li a3, 0 1439; RV32I-NEXT: call __muldi3@plt 1440; RV32I-NEXT: add a2, a0, s5 1441; RV32I-NEXT: sltu a0, a2, a0 1442; RV32I-NEXT: add a0, a1, a0 1443; RV32I-NEXT: add s8, s7, a0 1444; RV32I-NEXT: mv a0, s0 1445; RV32I-NEXT: li a1, 0 1446; RV32I-NEXT: mv a2, s2 1447; RV32I-NEXT: li a3, 0 1448; RV32I-NEXT: call __muldi3@plt 1449; RV32I-NEXT: mv s5, a0 1450; RV32I-NEXT: mv s6, a1 1451; RV32I-NEXT: add s9, a0, s8 1452; RV32I-NEXT: mv a0, s3 1453; RV32I-NEXT: mv a1, s2 1454; RV32I-NEXT: li a2, 0 1455; RV32I-NEXT: li a3, 0 1456; RV32I-NEXT: call __muldi3@plt 1457; RV32I-NEXT: mv s2, a0 1458; RV32I-NEXT: mv s3, a1 1459; RV32I-NEXT: mv a0, s4 1460; RV32I-NEXT: mv a1, s4 1461; RV32I-NEXT: mv a2, s1 1462; RV32I-NEXT: mv a3, s0 1463; RV32I-NEXT: call __muldi3@plt 1464; RV32I-NEXT: add a3, a0, s2 1465; RV32I-NEXT: add a2, s9, a3 1466; RV32I-NEXT: sltu a4, a2, s9 1467; RV32I-NEXT: sltu a5, s9, s5 1468; RV32I-NEXT: sltu a6, s8, s7 1469; RV32I-NEXT: add a6, s6, a6 1470; RV32I-NEXT: add a5, a6, a5 1471; RV32I-NEXT: add a1, a1, s3 1472; RV32I-NEXT: sltu a0, a3, a0 1473; RV32I-NEXT: add a0, a1, a0 1474; RV32I-NEXT: add a0, a5, a0 1475; RV32I-NEXT: add a1, a0, a4 1476; RV32I-NEXT: mv a0, a2 1477; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload 1478; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload 1479; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload 1480; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload 1481; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload 1482; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload 1483; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload 1484; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload 1485; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload 1486; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload 1487; RV32I-NEXT: lw s9, 4(sp) # 4-byte Folded Reload 1488; RV32I-NEXT: addi sp, sp, 48 1489; RV32I-NEXT: ret 1490; 1491; RV32IM-LABEL: mulhsu_i64: 1492; RV32IM: # %bb.0: 1493; RV32IM-NEXT: srai a4, a3, 31 1494; RV32IM-NEXT: mulhu a5, a0, a2 1495; RV32IM-NEXT: mul a6, a1, a2 1496; RV32IM-NEXT: add a5, a6, a5 1497; RV32IM-NEXT: sltu a6, a5, a6 1498; RV32IM-NEXT: mulhu a2, a1, a2 1499; RV32IM-NEXT: add a6, a2, a6 1500; RV32IM-NEXT: mul a2, a0, a3 1501; RV32IM-NEXT: add a5, a2, a5 1502; RV32IM-NEXT: sltu a2, a5, a2 1503; RV32IM-NEXT: mulhu a5, a0, a3 1504; RV32IM-NEXT: add a2, a5, a2 1505; RV32IM-NEXT: add a5, a6, a2 1506; RV32IM-NEXT: mul a7, a1, a3 1507; RV32IM-NEXT: add t0, a7, a5 1508; RV32IM-NEXT: mul t1, a4, a0 1509; RV32IM-NEXT: add a2, t0, t1 1510; RV32IM-NEXT: sltu t2, a2, t0 1511; RV32IM-NEXT: sltu a7, t0, a7 1512; RV32IM-NEXT: sltu a5, a5, a6 1513; RV32IM-NEXT: mulhu a3, a1, a3 1514; RV32IM-NEXT: add a3, a3, a5 1515; RV32IM-NEXT: add a3, a3, a7 1516; RV32IM-NEXT: mul a1, a4, a1 1517; RV32IM-NEXT: mulhu a0, a4, a0 1518; RV32IM-NEXT: add a0, a0, a1 1519; RV32IM-NEXT: add a0, a0, t1 1520; RV32IM-NEXT: add a0, a3, a0 1521; RV32IM-NEXT: add a1, a0, t2 1522; RV32IM-NEXT: mv a0, a2 1523; RV32IM-NEXT: ret 1524; 1525; RV64I-LABEL: mulhsu_i64: 1526; RV64I: # %bb.0: 1527; RV64I-NEXT: addi sp, sp, -16 1528; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1529; RV64I-NEXT: mv a2, a1 1530; RV64I-NEXT: srai a3, a1, 63 1531; RV64I-NEXT: li a1, 0 1532; RV64I-NEXT: call __multi3@plt 1533; RV64I-NEXT: mv a0, a1 1534; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1535; RV64I-NEXT: addi sp, sp, 16 1536; RV64I-NEXT: ret 1537; 1538; RV64IM-LABEL: mulhsu_i64: 1539; RV64IM: # %bb.0: 1540; RV64IM-NEXT: mulhsu a0, a1, a0 1541; RV64IM-NEXT: ret 1542 %1 = zext i64 %a to i128 1543 %2 = sext i64 %b to i128 1544 %3 = mul i128 %1, %2 1545 %4 = lshr i128 %3, 64 1546 %5 = trunc i128 %4 to i64 1547 ret i64 %5 1548} 1549 1550define i8 @muladd_demand(i8 %x, i8 %y) nounwind { 1551; RV32I-LABEL: muladd_demand: 1552; RV32I: # %bb.0: 1553; RV32I-NEXT: slli a0, a0, 1 1554; RV32I-NEXT: sub a0, a1, a0 1555; RV32I-NEXT: andi a0, a0, 15 1556; RV32I-NEXT: ret 1557; 1558; RV32IM-LABEL: muladd_demand: 1559; RV32IM: # %bb.0: 1560; RV32IM-NEXT: slli a0, a0, 1 1561; RV32IM-NEXT: sub a0, a1, a0 1562; RV32IM-NEXT: andi a0, a0, 15 1563; RV32IM-NEXT: ret 1564; 1565; RV64I-LABEL: muladd_demand: 1566; RV64I: # %bb.0: 1567; RV64I-NEXT: slliw a0, a0, 1 1568; RV64I-NEXT: subw a0, a1, a0 1569; RV64I-NEXT: andi a0, a0, 15 1570; RV64I-NEXT: ret 1571; 1572; RV64IM-LABEL: muladd_demand: 1573; RV64IM: # %bb.0: 1574; RV64IM-NEXT: slliw a0, a0, 1 1575; RV64IM-NEXT: subw a0, a1, a0 1576; RV64IM-NEXT: andi a0, a0, 15 1577; RV64IM-NEXT: ret 1578 %m = mul i8 %x, 14 1579 %a = add i8 %y, %m 1580 %r = and i8 %a, 15 1581 ret i8 %r 1582} 1583 1584define i8 @mulsub_demand(i8 %x, i8 %y) nounwind { 1585; RV32I-LABEL: mulsub_demand: 1586; RV32I: # %bb.0: 1587; RV32I-NEXT: slli a0, a0, 1 1588; RV32I-NEXT: add a0, a1, a0 1589; RV32I-NEXT: andi a0, a0, 15 1590; RV32I-NEXT: ret 1591; 1592; RV32IM-LABEL: mulsub_demand: 1593; RV32IM: # %bb.0: 1594; RV32IM-NEXT: slli a0, a0, 1 1595; RV32IM-NEXT: add a0, a1, a0 1596; RV32IM-NEXT: andi a0, a0, 15 1597; RV32IM-NEXT: ret 1598; 1599; RV64I-LABEL: mulsub_demand: 1600; RV64I: # %bb.0: 1601; RV64I-NEXT: slliw a0, a0, 1 1602; RV64I-NEXT: addw a0, a1, a0 1603; RV64I-NEXT: andi a0, a0, 15 1604; RV64I-NEXT: ret 1605; 1606; RV64IM-LABEL: mulsub_demand: 1607; RV64IM: # %bb.0: 1608; RV64IM-NEXT: slliw a0, a0, 1 1609; RV64IM-NEXT: addw a0, a1, a0 1610; RV64IM-NEXT: andi a0, a0, 15 1611; RV64IM-NEXT: ret 1612 %m = mul i8 %x, 14 1613 %a = sub i8 %y, %m 1614 %r = and i8 %a, 15 1615 ret i8 %r 1616} 1617