1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines the interfaces that RISCV uses to lower LLVM code into a 10 // selection DAG. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "RISCVISelLowering.h" 15 #include "MCTargetDesc/RISCVMatInt.h" 16 #include "RISCV.h" 17 #include "RISCVMachineFunctionInfo.h" 18 #include "RISCVRegisterInfo.h" 19 #include "RISCVSubtarget.h" 20 #include "RISCVTargetMachine.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/MemoryLocation.h" 24 #include "llvm/CodeGen/MachineFrameInfo.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineInstrBuilder.h" 27 #include "llvm/CodeGen/MachineJumpTableInfo.h" 28 #include "llvm/CodeGen/MachineRegisterInfo.h" 29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 30 #include "llvm/CodeGen/ValueTypes.h" 31 #include "llvm/IR/DiagnosticInfo.h" 32 #include "llvm/IR/DiagnosticPrinter.h" 33 #include "llvm/IR/IRBuilder.h" 34 #include "llvm/IR/IntrinsicsRISCV.h" 35 #include "llvm/IR/PatternMatch.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/ErrorHandling.h" 38 #include "llvm/Support/KnownBits.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/raw_ostream.h" 41 42 using namespace llvm; 43 44 #define DEBUG_TYPE "riscv-lower" 45 46 STATISTIC(NumTailCalls, "Number of tail calls"); 47 48 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, 49 const RISCVSubtarget &STI) 50 : TargetLowering(TM), Subtarget(STI) { 51 52 if (Subtarget.isRV32E()) 53 report_fatal_error("Codegen not yet implemented for RV32E"); 54 55 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 56 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI"); 57 58 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) && 59 !Subtarget.hasStdExtF()) { 60 errs() << "Hard-float 'f' ABI can't be used for a target that " 61 "doesn't support the F instruction set extension (ignoring " 62 "target-abi)\n"; 63 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 64 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) && 65 !Subtarget.hasStdExtD()) { 66 errs() << "Hard-float 'd' ABI can't be used for a target that " 67 "doesn't support the D instruction set extension (ignoring " 68 "target-abi)\n"; 69 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32; 70 } 71 72 switch (ABI) { 73 default: 74 report_fatal_error("Don't know how to lower this ABI"); 75 case RISCVABI::ABI_ILP32: 76 case RISCVABI::ABI_ILP32F: 77 case RISCVABI::ABI_ILP32D: 78 case RISCVABI::ABI_LP64: 79 case RISCVABI::ABI_LP64F: 80 case RISCVABI::ABI_LP64D: 81 break; 82 } 83 84 MVT XLenVT = Subtarget.getXLenVT(); 85 86 // Set up the register classes. 87 addRegisterClass(XLenVT, &RISCV::GPRRegClass); 88 89 if (Subtarget.hasStdExtZfh()) 90 addRegisterClass(MVT::f16, &RISCV::FPR16RegClass); 91 if (Subtarget.hasStdExtF()) 92 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass); 93 if (Subtarget.hasStdExtD()) 94 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass); 95 96 static const MVT::SimpleValueType BoolVecVTs[] = { 97 MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1, 98 MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1}; 99 static const MVT::SimpleValueType IntVecVTs[] = { 100 MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8, 101 MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16, 102 MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32, 103 MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64, 104 MVT::nxv4i64, MVT::nxv8i64}; 105 static const MVT::SimpleValueType F16VecVTs[] = { 106 MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16, 107 MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16}; 108 static const MVT::SimpleValueType F32VecVTs[] = { 109 MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32}; 110 static const MVT::SimpleValueType F64VecVTs[] = { 111 MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64}; 112 113 if (Subtarget.hasVInstructions()) { 114 auto addRegClassForRVV = [this](MVT VT) { 115 unsigned Size = VT.getSizeInBits().getKnownMinValue(); 116 assert(Size <= 512 && isPowerOf2_32(Size)); 117 const TargetRegisterClass *RC; 118 if (Size <= 64) 119 RC = &RISCV::VRRegClass; 120 else if (Size == 128) 121 RC = &RISCV::VRM2RegClass; 122 else if (Size == 256) 123 RC = &RISCV::VRM4RegClass; 124 else 125 RC = &RISCV::VRM8RegClass; 126 127 addRegisterClass(VT, RC); 128 }; 129 130 for (MVT VT : BoolVecVTs) 131 addRegClassForRVV(VT); 132 for (MVT VT : IntVecVTs) { 133 if (VT.getVectorElementType() == MVT::i64 && 134 !Subtarget.hasVInstructionsI64()) 135 continue; 136 addRegClassForRVV(VT); 137 } 138 139 if (Subtarget.hasVInstructionsF16()) 140 for (MVT VT : F16VecVTs) 141 addRegClassForRVV(VT); 142 143 if (Subtarget.hasVInstructionsF32()) 144 for (MVT VT : F32VecVTs) 145 addRegClassForRVV(VT); 146 147 if (Subtarget.hasVInstructionsF64()) 148 for (MVT VT : F64VecVTs) 149 addRegClassForRVV(VT); 150 151 if (Subtarget.useRVVForFixedLengthVectors()) { 152 auto addRegClassForFixedVectors = [this](MVT VT) { 153 MVT ContainerVT = getContainerForFixedLengthVector(VT); 154 unsigned RCID = getRegClassIDForVecVT(ContainerVT); 155 const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo(); 156 addRegisterClass(VT, TRI.getRegClass(RCID)); 157 }; 158 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) 159 if (useRVVForFixedLengthVectorVT(VT)) 160 addRegClassForFixedVectors(VT); 161 162 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) 163 if (useRVVForFixedLengthVectorVT(VT)) 164 addRegClassForFixedVectors(VT); 165 } 166 } 167 168 // Compute derived properties from the register classes. 169 computeRegisterProperties(STI.getRegisterInfo()); 170 171 setStackPointerRegisterToSaveRestore(RISCV::X2); 172 173 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, XLenVT, 174 MVT::i1, Promote); 175 176 // TODO: add all necessary setOperationAction calls. 177 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand); 178 179 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 180 setOperationAction(ISD::BR_CC, XLenVT, Expand); 181 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 182 setOperationAction(ISD::SELECT_CC, XLenVT, Expand); 183 184 setOperationAction({ISD::STACKSAVE, ISD::STACKRESTORE}, MVT::Other, Expand); 185 186 setOperationAction(ISD::VASTART, MVT::Other, Custom); 187 setOperationAction({ISD::VAARG, ISD::VACOPY, ISD::VAEND}, MVT::Other, Expand); 188 189 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 190 if (!Subtarget.hasStdExtZbb()) 191 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::i8, MVT::i16}, Expand); 192 193 if (Subtarget.is64Bit()) { 194 setOperationAction({ISD::ADD, ISD::SUB, ISD::SHL, ISD::SRA, ISD::SRL}, 195 MVT::i32, Custom); 196 197 setOperationAction({ISD::UADDO, ISD::USUBO, ISD::UADDSAT, ISD::USUBSAT}, 198 MVT::i32, Custom); 199 } else { 200 setLibcallName( 201 {RTLIB::SHL_I128, RTLIB::SRL_I128, RTLIB::SRA_I128, RTLIB::MUL_I128}, 202 nullptr); 203 setLibcallName(RTLIB::MULO_I64, nullptr); 204 } 205 206 if (!Subtarget.hasStdExtM()) { 207 setOperationAction({ISD::MUL, ISD::MULHS, ISD::MULHU, ISD::SDIV, ISD::UDIV, 208 ISD::SREM, ISD::UREM}, 209 XLenVT, Expand); 210 } else { 211 if (Subtarget.is64Bit()) { 212 setOperationAction(ISD::MUL, {MVT::i32, MVT::i128}, Custom); 213 214 setOperationAction({ISD::SDIV, ISD::UDIV, ISD::UREM}, 215 {MVT::i8, MVT::i16, MVT::i32}, Custom); 216 } else { 217 setOperationAction(ISD::MUL, MVT::i64, Custom); 218 } 219 } 220 221 setOperationAction( 222 {ISD::SDIVREM, ISD::UDIVREM, ISD::SMUL_LOHI, ISD::UMUL_LOHI}, XLenVT, 223 Expand); 224 225 setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, XLenVT, 226 Custom); 227 228 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() || 229 Subtarget.hasStdExtZbkb()) { 230 if (Subtarget.is64Bit()) 231 setOperationAction({ISD::ROTL, ISD::ROTR}, MVT::i32, Custom); 232 } else { 233 setOperationAction({ISD::ROTL, ISD::ROTR}, XLenVT, Expand); 234 } 235 236 if (Subtarget.hasStdExtZbp()) { 237 // Custom lower bswap/bitreverse so we can convert them to GREVI to enable 238 // more combining. 239 setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, XLenVT, Custom); 240 241 // BSWAP i8 doesn't exist. 242 setOperationAction(ISD::BITREVERSE, MVT::i8, Custom); 243 244 setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i16, Custom); 245 246 if (Subtarget.is64Bit()) 247 setOperationAction({ISD::BITREVERSE, ISD::BSWAP}, MVT::i32, Custom); 248 } else { 249 // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll 250 // pattern match it directly in isel. 251 setOperationAction(ISD::BSWAP, XLenVT, 252 (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) 253 ? Legal 254 : Expand); 255 // Zbkb can use rev8+brev8 to implement bitreverse. 256 setOperationAction(ISD::BITREVERSE, XLenVT, 257 Subtarget.hasStdExtZbkb() ? Custom : Expand); 258 } 259 260 if (Subtarget.hasStdExtZbb()) { 261 setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, XLenVT, 262 Legal); 263 264 if (Subtarget.is64Bit()) 265 setOperationAction( 266 {ISD::CTTZ, ISD::CTTZ_ZERO_UNDEF, ISD::CTLZ, ISD::CTLZ_ZERO_UNDEF}, 267 MVT::i32, Custom); 268 } else { 269 setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP}, XLenVT, Expand); 270 271 if (Subtarget.is64Bit()) 272 setOperationAction(ISD::ABS, MVT::i32, Custom); 273 } 274 275 if (Subtarget.hasStdExtZbt()) { 276 setOperationAction({ISD::FSHL, ISD::FSHR}, XLenVT, Custom); 277 setOperationAction(ISD::SELECT, XLenVT, Legal); 278 279 if (Subtarget.is64Bit()) 280 setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom); 281 } else { 282 setOperationAction(ISD::SELECT, XLenVT, Custom); 283 } 284 285 static constexpr ISD::NodeType FPLegalNodeTypes[] = { 286 ISD::FMINNUM, ISD::FMAXNUM, ISD::LRINT, 287 ISD::LLRINT, ISD::LROUND, ISD::LLROUND, 288 ISD::STRICT_LRINT, ISD::STRICT_LLRINT, ISD::STRICT_LROUND, 289 ISD::STRICT_LLROUND, ISD::STRICT_FMA, ISD::STRICT_FADD, 290 ISD::STRICT_FSUB, ISD::STRICT_FMUL, ISD::STRICT_FDIV, 291 ISD::STRICT_FSQRT, ISD::STRICT_FSETCC, ISD::STRICT_FSETCCS}; 292 293 static const ISD::CondCode FPCCToExpand[] = { 294 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 295 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT, 296 ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO}; 297 298 static const ISD::NodeType FPOpToExpand[] = { 299 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, 300 ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16}; 301 302 if (Subtarget.hasStdExtZfh()) 303 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 304 305 if (Subtarget.hasStdExtZfh()) { 306 for (auto NT : FPLegalNodeTypes) 307 setOperationAction(NT, MVT::f16, Legal); 308 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Legal); 309 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal); 310 for (auto CC : FPCCToExpand) 311 setCondCodeAction(CC, MVT::f16, Expand); 312 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand); 313 setOperationAction(ISD::SELECT, MVT::f16, Custom); 314 setOperationAction(ISD::BR_CC, MVT::f16, Expand); 315 316 setOperationAction({ISD::FREM, ISD::FCEIL, ISD::FFLOOR, ISD::FNEARBYINT, 317 ISD::FRINT, ISD::FROUND, ISD::FROUNDEVEN, ISD::FTRUNC, 318 ISD::FPOW, ISD::FPOWI, ISD::FCOS, ISD::FSIN, 319 ISD::FSINCOS, ISD::FEXP, ISD::FEXP2, ISD::FLOG, 320 ISD::FLOG2, ISD::FLOG10}, 321 MVT::f16, Promote); 322 323 // FIXME: Need to promote f16 STRICT_* to f32 libcalls, but we don't have 324 // complete support for all operations in LegalizeDAG. 325 326 // We need to custom promote this. 327 if (Subtarget.is64Bit()) 328 setOperationAction(ISD::FPOWI, MVT::i32, Custom); 329 } 330 331 if (Subtarget.hasStdExtF()) { 332 for (auto NT : FPLegalNodeTypes) 333 setOperationAction(NT, MVT::f32, Legal); 334 for (auto CC : FPCCToExpand) 335 setCondCodeAction(CC, MVT::f32, Expand); 336 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand); 337 setOperationAction(ISD::SELECT, MVT::f32, Custom); 338 setOperationAction(ISD::BR_CC, MVT::f32, Expand); 339 for (auto Op : FPOpToExpand) 340 setOperationAction(Op, MVT::f32, Expand); 341 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 342 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 343 } 344 345 if (Subtarget.hasStdExtF() && Subtarget.is64Bit()) 346 setOperationAction(ISD::BITCAST, MVT::i32, Custom); 347 348 if (Subtarget.hasStdExtD()) { 349 for (auto NT : FPLegalNodeTypes) 350 setOperationAction(NT, MVT::f64, Legal); 351 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal); 352 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal); 353 for (auto CC : FPCCToExpand) 354 setCondCodeAction(CC, MVT::f64, Expand); 355 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); 356 setOperationAction(ISD::SELECT, MVT::f64, Custom); 357 setOperationAction(ISD::BR_CC, MVT::f64, Expand); 358 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 359 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 360 for (auto Op : FPOpToExpand) 361 setOperationAction(Op, MVT::f64, Expand); 362 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 363 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 364 } 365 366 if (Subtarget.is64Bit()) 367 setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT, 368 ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT}, 369 MVT::i32, Custom); 370 371 if (Subtarget.hasStdExtF()) { 372 setOperationAction({ISD::FP_TO_UINT_SAT, ISD::FP_TO_SINT_SAT}, XLenVT, 373 Custom); 374 375 setOperationAction({ISD::STRICT_FP_TO_UINT, ISD::STRICT_FP_TO_SINT, 376 ISD::STRICT_UINT_TO_FP, ISD::STRICT_SINT_TO_FP}, 377 XLenVT, Legal); 378 379 setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom); 380 setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom); 381 } 382 383 setOperationAction({ISD::GlobalAddress, ISD::BlockAddress, ISD::ConstantPool, 384 ISD::JumpTable}, 385 XLenVT, Custom); 386 387 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom); 388 389 // TODO: On M-mode only targets, the cycle[h] CSR may not be present. 390 // Unfortunately this can't be determined just from the ISA naming string. 391 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, 392 Subtarget.is64Bit() ? Legal : Custom); 393 394 setOperationAction({ISD::TRAP, ISD::DEBUGTRAP}, MVT::Other, Legal); 395 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 396 if (Subtarget.is64Bit()) 397 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom); 398 399 if (Subtarget.hasStdExtA()) { 400 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen()); 401 setMinCmpXchgSizeInBits(32); 402 } else { 403 setMaxAtomicSizeInBitsSupported(0); 404 } 405 406 setBooleanContents(ZeroOrOneBooleanContent); 407 408 if (Subtarget.hasVInstructions()) { 409 setBooleanVectorContents(ZeroOrOneBooleanContent); 410 411 setOperationAction(ISD::VSCALE, XLenVT, Custom); 412 413 // RVV intrinsics may have illegal operands. 414 // We also need to custom legalize vmv.x.s. 415 setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN}, 416 {MVT::i8, MVT::i16}, Custom); 417 if (Subtarget.is64Bit()) 418 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom); 419 else 420 setOperationAction({ISD::INTRINSIC_WO_CHAIN, ISD::INTRINSIC_W_CHAIN}, 421 MVT::i64, Custom); 422 423 setOperationAction({ISD::INTRINSIC_W_CHAIN, ISD::INTRINSIC_VOID}, 424 MVT::Other, Custom); 425 426 static const unsigned IntegerVPOps[] = { 427 ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL, 428 ISD::VP_SDIV, ISD::VP_UDIV, ISD::VP_SREM, 429 ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR, 430 ISD::VP_XOR, ISD::VP_ASHR, ISD::VP_LSHR, 431 ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND, 432 ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX, 433 ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN, 434 ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FPTOSI, 435 ISD::VP_FPTOUI, ISD::VP_SETCC, ISD::VP_SIGN_EXTEND, 436 ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE}; 437 438 static const unsigned FloatingPointVPOps[] = { 439 ISD::VP_FADD, ISD::VP_FSUB, 440 ISD::VP_FMUL, ISD::VP_FDIV, 441 ISD::VP_FNEG, ISD::VP_FMA, 442 ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, 443 ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, 444 ISD::VP_MERGE, ISD::VP_SELECT, 445 ISD::VP_SITOFP, ISD::VP_UITOFP, 446 ISD::VP_SETCC, ISD::VP_FP_ROUND, 447 ISD::VP_FP_EXTEND}; 448 449 if (!Subtarget.is64Bit()) { 450 // We must custom-lower certain vXi64 operations on RV32 due to the vector 451 // element type being illegal. 452 setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, 453 MVT::i64, Custom); 454 455 setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND, 456 ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR, 457 ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, 458 ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN}, 459 MVT::i64, Custom); 460 461 setOperationAction({ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND, 462 ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, 463 ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN, 464 ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN}, 465 MVT::i64, Custom); 466 } 467 468 for (MVT VT : BoolVecVTs) { 469 setOperationAction(ISD::SPLAT_VECTOR, VT, Custom); 470 471 // Mask VTs are custom-expanded into a series of standard nodes 472 setOperationAction({ISD::TRUNCATE, ISD::CONCAT_VECTORS, 473 ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, 474 VT, Custom); 475 476 setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT, 477 Custom); 478 479 setOperationAction(ISD::SELECT, VT, Custom); 480 setOperationAction( 481 {ISD::SELECT_CC, ISD::VSELECT, ISD::VP_MERGE, ISD::VP_SELECT}, VT, 482 Expand); 483 484 setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR}, VT, Custom); 485 486 setOperationAction( 487 {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT, 488 Custom); 489 490 setOperationAction( 491 {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT, 492 Custom); 493 494 // RVV has native int->float & float->int conversions where the 495 // element type sizes are within one power-of-two of each other. Any 496 // wider distances between type sizes have to be lowered as sequences 497 // which progressively narrow the gap in stages. 498 setOperationAction( 499 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, 500 VT, Custom); 501 502 // Expand all extending loads to types larger than this, and truncating 503 // stores from types larger than this. 504 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { 505 setTruncStoreAction(OtherVT, VT, Expand); 506 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT, 507 VT, Expand); 508 } 509 510 setOperationAction( 511 {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNCATE, ISD::VP_SETCC}, VT, 512 Custom); 513 } 514 515 for (MVT VT : IntVecVTs) { 516 if (VT.getVectorElementType() == MVT::i64 && 517 !Subtarget.hasVInstructionsI64()) 518 continue; 519 520 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 521 setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); 522 523 // Vectors implement MULHS/MULHU. 524 setOperationAction({ISD::SMUL_LOHI, ISD::UMUL_LOHI}, VT, Expand); 525 526 // nxvXi64 MULHS/MULHU requires the V extension instead of Zve64*. 527 if (VT.getVectorElementType() == MVT::i64 && !Subtarget.hasStdExtV()) 528 setOperationAction({ISD::MULHU, ISD::MULHS}, VT, Expand); 529 530 setOperationAction({ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}, VT, 531 Legal); 532 533 setOperationAction({ISD::ROTL, ISD::ROTR}, VT, Expand); 534 535 setOperationAction({ISD::CTTZ, ISD::CTLZ, ISD::CTPOP, ISD::BSWAP}, VT, 536 Expand); 537 538 setOperationAction(ISD::BSWAP, VT, Expand); 539 540 // Custom-lower extensions and truncations from/to mask types. 541 setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, 542 VT, Custom); 543 544 // RVV has native int->float & float->int conversions where the 545 // element type sizes are within one power-of-two of each other. Any 546 // wider distances between type sizes have to be lowered as sequences 547 // which progressively narrow the gap in stages. 548 setOperationAction( 549 {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT}, 550 VT, Custom); 551 552 setOperationAction( 553 {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, Legal); 554 555 // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL" 556 // nodes which truncate by one power of two at a time. 557 setOperationAction(ISD::TRUNCATE, VT, Custom); 558 559 // Custom-lower insert/extract operations to simplify patterns. 560 setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT, 561 Custom); 562 563 // Custom-lower reduction operations to set up the corresponding custom 564 // nodes' operands. 565 setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_AND, 566 ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR, 567 ISD::VECREDUCE_SMAX, ISD::VECREDUCE_SMIN, 568 ISD::VECREDUCE_UMAX, ISD::VECREDUCE_UMIN}, 569 VT, Custom); 570 571 for (unsigned VPOpc : IntegerVPOps) 572 setOperationAction(VPOpc, VT, Custom); 573 574 setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom); 575 576 setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, 577 VT, Custom); 578 579 setOperationAction( 580 {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT, 581 Custom); 582 583 setOperationAction( 584 {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, 585 VT, Custom); 586 587 setOperationAction(ISD::SELECT, VT, Custom); 588 setOperationAction(ISD::SELECT_CC, VT, Expand); 589 590 setOperationAction({ISD::STEP_VECTOR, ISD::VECTOR_REVERSE}, VT, Custom); 591 592 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) { 593 setTruncStoreAction(VT, OtherVT, Expand); 594 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, OtherVT, 595 VT, Expand); 596 } 597 598 // Splice 599 setOperationAction(ISD::VECTOR_SPLICE, VT, Custom); 600 601 // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point 602 // type that can represent the value exactly. 603 if (VT.getVectorElementType() != MVT::i64) { 604 MVT FloatEltVT = 605 VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32; 606 EVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount()); 607 if (isTypeLegal(FloatVT)) { 608 setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, 609 Custom); 610 } 611 } 612 } 613 614 // Expand various CCs to best match the RVV ISA, which natively supports UNE 615 // but no other unordered comparisons, and supports all ordered comparisons 616 // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization 617 // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE), 618 // and we pattern-match those back to the "original", swapping operands once 619 // more. This way we catch both operations and both "vf" and "fv" forms with 620 // fewer patterns. 621 static const ISD::CondCode VFPCCToExpand[] = { 622 ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT, 623 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO, 624 ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE, 625 }; 626 627 // Sets common operation actions on RVV floating-point vector types. 628 const auto SetCommonVFPActions = [&](MVT VT) { 629 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 630 // RVV has native FP_ROUND & FP_EXTEND conversions where the element type 631 // sizes are within one power-of-two of each other. Therefore conversions 632 // between vXf16 and vXf64 must be lowered as sequences which convert via 633 // vXf32. 634 setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); 635 // Custom-lower insert/extract operations to simplify patterns. 636 setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, VT, 637 Custom); 638 // Expand various condition codes (explained above). 639 for (auto CC : VFPCCToExpand) 640 setCondCodeAction(CC, VT, Expand); 641 642 setOperationAction({ISD::FMINNUM, ISD::FMAXNUM}, VT, Legal); 643 644 setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND}, 645 VT, Custom); 646 647 setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD, 648 ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX}, 649 VT, Custom); 650 651 setOperationAction(ISD::FCOPYSIGN, VT, Legal); 652 653 setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom); 654 655 setOperationAction({ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, 656 VT, Custom); 657 658 setOperationAction( 659 {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT, 660 Custom); 661 662 setOperationAction(ISD::SELECT, VT, Custom); 663 setOperationAction(ISD::SELECT_CC, VT, Expand); 664 665 setOperationAction( 666 {ISD::CONCAT_VECTORS, ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, 667 VT, Custom); 668 669 setOperationAction({ISD::VECTOR_REVERSE, ISD::VECTOR_SPLICE}, VT, Custom); 670 671 for (unsigned VPOpc : FloatingPointVPOps) 672 setOperationAction(VPOpc, VT, Custom); 673 }; 674 675 // Sets common extload/truncstore actions on RVV floating-point vector 676 // types. 677 const auto SetCommonVFPExtLoadTruncStoreActions = 678 [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) { 679 for (auto SmallVT : SmallerVTs) { 680 setTruncStoreAction(VT, SmallVT, Expand); 681 setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand); 682 } 683 }; 684 685 if (Subtarget.hasVInstructionsF16()) 686 for (MVT VT : F16VecVTs) 687 SetCommonVFPActions(VT); 688 689 for (MVT VT : F32VecVTs) { 690 if (Subtarget.hasVInstructionsF32()) 691 SetCommonVFPActions(VT); 692 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); 693 } 694 695 for (MVT VT : F64VecVTs) { 696 if (Subtarget.hasVInstructionsF64()) 697 SetCommonVFPActions(VT); 698 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs); 699 SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs); 700 } 701 702 if (Subtarget.useRVVForFixedLengthVectors()) { 703 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { 704 if (!useRVVForFixedLengthVectorVT(VT)) 705 continue; 706 707 // By default everything must be expanded. 708 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 709 setOperationAction(Op, VT, Expand); 710 for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) { 711 setTruncStoreAction(VT, OtherVT, Expand); 712 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, 713 OtherVT, VT, Expand); 714 } 715 716 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 717 setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT, 718 Custom); 719 720 setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS}, VT, 721 Custom); 722 723 setOperationAction({ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT}, 724 VT, Custom); 725 726 setOperationAction({ISD::LOAD, ISD::STORE}, VT, Custom); 727 728 setOperationAction(ISD::SETCC, VT, Custom); 729 730 setOperationAction(ISD::SELECT, VT, Custom); 731 732 setOperationAction(ISD::TRUNCATE, VT, Custom); 733 734 setOperationAction(ISD::BITCAST, VT, Custom); 735 736 setOperationAction( 737 {ISD::VECREDUCE_AND, ISD::VECREDUCE_OR, ISD::VECREDUCE_XOR}, VT, 738 Custom); 739 740 setOperationAction( 741 {ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR}, VT, 742 Custom); 743 744 setOperationAction({ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::FP_TO_SINT, 745 ISD::FP_TO_UINT}, 746 VT, Custom); 747 748 // Operations below are different for between masks and other vectors. 749 if (VT.getVectorElementType() == MVT::i1) { 750 setOperationAction({ISD::VP_AND, ISD::VP_OR, ISD::VP_XOR, ISD::AND, 751 ISD::OR, ISD::XOR}, 752 VT, Custom); 753 754 setOperationAction( 755 {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_SETCC, ISD::VP_TRUNCATE}, 756 VT, Custom); 757 continue; 758 } 759 760 // Make SPLAT_VECTOR Legal so DAGCombine will convert splat vectors to 761 // it before type legalization for i64 vectors on RV32. It will then be 762 // type legalized to SPLAT_VECTOR_PARTS which we need to Custom handle. 763 // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs 764 // improvements first. 765 if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) { 766 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal); 767 setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom); 768 } 769 770 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 771 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 772 773 setOperationAction( 774 {ISD::MLOAD, ISD::MSTORE, ISD::MGATHER, ISD::MSCATTER}, VT, Custom); 775 776 setOperationAction( 777 {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT, 778 Custom); 779 780 setOperationAction({ISD::ADD, ISD::MUL, ISD::SUB, ISD::AND, ISD::OR, 781 ISD::XOR, ISD::SDIV, ISD::SREM, ISD::UDIV, 782 ISD::UREM, ISD::SHL, ISD::SRA, ISD::SRL}, 783 VT, Custom); 784 785 setOperationAction( 786 {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX, ISD::ABS}, VT, Custom); 787 788 // vXi64 MULHS/MULHU requires the V extension instead of Zve64*. 789 if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV()) 790 setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom); 791 792 setOperationAction( 793 {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT}, VT, 794 Custom); 795 796 setOperationAction(ISD::VSELECT, VT, Custom); 797 setOperationAction(ISD::SELECT_CC, VT, Expand); 798 799 setOperationAction( 800 {ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, VT, Custom); 801 802 // Custom-lower reduction operations to set up the corresponding custom 803 // nodes' operands. 804 setOperationAction({ISD::VECREDUCE_ADD, ISD::VECREDUCE_SMAX, 805 ISD::VECREDUCE_SMIN, ISD::VECREDUCE_UMAX, 806 ISD::VECREDUCE_UMIN}, 807 VT, Custom); 808 809 for (unsigned VPOpc : IntegerVPOps) 810 setOperationAction(VPOpc, VT, Custom); 811 812 // Lower CTLZ_ZERO_UNDEF and CTTZ_ZERO_UNDEF if we have a floating point 813 // type that can represent the value exactly. 814 if (VT.getVectorElementType() != MVT::i64) { 815 MVT FloatEltVT = 816 VT.getVectorElementType() == MVT::i32 ? MVT::f64 : MVT::f32; 817 EVT FloatVT = 818 MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount()); 819 if (isTypeLegal(FloatVT)) 820 setOperationAction({ISD::CTLZ_ZERO_UNDEF, ISD::CTTZ_ZERO_UNDEF}, VT, 821 Custom); 822 } 823 } 824 825 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) { 826 if (!useRVVForFixedLengthVectorVT(VT)) 827 continue; 828 829 // By default everything must be expanded. 830 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) 831 setOperationAction(Op, VT, Expand); 832 for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) { 833 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand); 834 setTruncStoreAction(VT, OtherVT, Expand); 835 } 836 837 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed. 838 setOperationAction({ISD::INSERT_SUBVECTOR, ISD::EXTRACT_SUBVECTOR}, VT, 839 Custom); 840 841 setOperationAction({ISD::BUILD_VECTOR, ISD::CONCAT_VECTORS, 842 ISD::VECTOR_SHUFFLE, ISD::INSERT_VECTOR_ELT, 843 ISD::EXTRACT_VECTOR_ELT}, 844 VT, Custom); 845 846 setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE, 847 ISD::MGATHER, ISD::MSCATTER}, 848 VT, Custom); 849 850 setOperationAction( 851 {ISD::VP_LOAD, ISD::VP_STORE, ISD::VP_GATHER, ISD::VP_SCATTER}, VT, 852 Custom); 853 854 setOperationAction({ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV, 855 ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN, ISD::FSQRT, 856 ISD::FMA, ISD::FMINNUM, ISD::FMAXNUM}, 857 VT, Custom); 858 859 setOperationAction({ISD::FP_ROUND, ISD::FP_EXTEND}, VT, Custom); 860 861 setOperationAction({ISD::FTRUNC, ISD::FCEIL, ISD::FFLOOR, ISD::FROUND}, 862 VT, Custom); 863 864 for (auto CC : VFPCCToExpand) 865 setCondCodeAction(CC, VT, Expand); 866 867 setOperationAction({ISD::VSELECT, ISD::SELECT}, VT, Custom); 868 setOperationAction(ISD::SELECT_CC, VT, Expand); 869 870 setOperationAction(ISD::BITCAST, VT, Custom); 871 872 setOperationAction({ISD::VECREDUCE_FADD, ISD::VECREDUCE_SEQ_FADD, 873 ISD::VECREDUCE_FMIN, ISD::VECREDUCE_FMAX}, 874 VT, Custom); 875 876 for (unsigned VPOpc : FloatingPointVPOps) 877 setOperationAction(VPOpc, VT, Custom); 878 } 879 880 // Custom-legalize bitcasts from fixed-length vectors to scalar types. 881 setOperationAction(ISD::BITCAST, {MVT::i8, MVT::i16, MVT::i32, MVT::i64}, 882 Custom); 883 if (Subtarget.hasStdExtZfh()) 884 setOperationAction(ISD::BITCAST, MVT::f16, Custom); 885 if (Subtarget.hasStdExtF()) 886 setOperationAction(ISD::BITCAST, MVT::f32, Custom); 887 if (Subtarget.hasStdExtD()) 888 setOperationAction(ISD::BITCAST, MVT::f64, Custom); 889 } 890 } 891 892 // Function alignments. 893 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4); 894 setMinFunctionAlignment(FunctionAlignment); 895 setPrefFunctionAlignment(FunctionAlignment); 896 897 setMinimumJumpTableEntries(5); 898 899 // Jumps are expensive, compared to logic 900 setJumpIsExpensive(); 901 902 setTargetDAGCombine({ISD::INTRINSIC_WO_CHAIN, ISD::ADD, ISD::SUB, ISD::AND, 903 ISD::OR, ISD::XOR}); 904 905 if (Subtarget.hasStdExtF()) 906 setTargetDAGCombine({ISD::FADD, ISD::FMAXNUM, ISD::FMINNUM}); 907 908 if (Subtarget.hasStdExtZbp()) 909 setTargetDAGCombine({ISD::ROTL, ISD::ROTR}); 910 911 if (Subtarget.hasStdExtZbb()) 912 setTargetDAGCombine({ISD::UMAX, ISD::UMIN, ISD::SMAX, ISD::SMIN}); 913 914 if (Subtarget.hasStdExtZbkb()) 915 setTargetDAGCombine(ISD::BITREVERSE); 916 if (Subtarget.hasStdExtZfh() || Subtarget.hasStdExtZbb()) 917 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG); 918 if (Subtarget.hasStdExtF()) 919 setTargetDAGCombine({ISD::ZERO_EXTEND, ISD::FP_TO_SINT, ISD::FP_TO_UINT, 920 ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}); 921 if (Subtarget.hasVInstructions()) 922 setTargetDAGCombine({ISD::FCOPYSIGN, ISD::MGATHER, ISD::MSCATTER, 923 ISD::VP_GATHER, ISD::VP_SCATTER, ISD::SRA, ISD::SRL, 924 ISD::SHL, ISD::STORE, ISD::SPLAT_VECTOR}); 925 926 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2"); 927 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2"); 928 } 929 930 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, 931 LLVMContext &Context, 932 EVT VT) const { 933 if (!VT.isVector()) 934 return getPointerTy(DL); 935 if (Subtarget.hasVInstructions() && 936 (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors())) 937 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount()); 938 return VT.changeVectorElementTypeToInteger(); 939 } 940 941 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const { 942 return Subtarget.getXLenVT(); 943 } 944 945 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 946 const CallInst &I, 947 MachineFunction &MF, 948 unsigned Intrinsic) const { 949 auto &DL = I.getModule()->getDataLayout(); 950 switch (Intrinsic) { 951 default: 952 return false; 953 case Intrinsic::riscv_masked_atomicrmw_xchg_i32: 954 case Intrinsic::riscv_masked_atomicrmw_add_i32: 955 case Intrinsic::riscv_masked_atomicrmw_sub_i32: 956 case Intrinsic::riscv_masked_atomicrmw_nand_i32: 957 case Intrinsic::riscv_masked_atomicrmw_max_i32: 958 case Intrinsic::riscv_masked_atomicrmw_min_i32: 959 case Intrinsic::riscv_masked_atomicrmw_umax_i32: 960 case Intrinsic::riscv_masked_atomicrmw_umin_i32: 961 case Intrinsic::riscv_masked_cmpxchg_i32: 962 Info.opc = ISD::INTRINSIC_W_CHAIN; 963 Info.memVT = MVT::i32; 964 Info.ptrVal = I.getArgOperand(0); 965 Info.offset = 0; 966 Info.align = Align(4); 967 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore | 968 MachineMemOperand::MOVolatile; 969 return true; 970 case Intrinsic::riscv_masked_strided_load: 971 Info.opc = ISD::INTRINSIC_W_CHAIN; 972 Info.ptrVal = I.getArgOperand(1); 973 Info.memVT = getValueType(DL, I.getType()->getScalarType()); 974 Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8); 975 Info.size = MemoryLocation::UnknownSize; 976 Info.flags |= MachineMemOperand::MOLoad; 977 return true; 978 case Intrinsic::riscv_masked_strided_store: 979 Info.opc = ISD::INTRINSIC_VOID; 980 Info.ptrVal = I.getArgOperand(1); 981 Info.memVT = 982 getValueType(DL, I.getArgOperand(0)->getType()->getScalarType()); 983 Info.align = Align( 984 DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) / 985 8); 986 Info.size = MemoryLocation::UnknownSize; 987 Info.flags |= MachineMemOperand::MOStore; 988 return true; 989 case Intrinsic::riscv_seg2_load: 990 case Intrinsic::riscv_seg3_load: 991 case Intrinsic::riscv_seg4_load: 992 case Intrinsic::riscv_seg5_load: 993 case Intrinsic::riscv_seg6_load: 994 case Intrinsic::riscv_seg7_load: 995 case Intrinsic::riscv_seg8_load: 996 Info.opc = ISD::INTRINSIC_W_CHAIN; 997 Info.ptrVal = I.getArgOperand(0); 998 Info.memVT = 999 getValueType(DL, I.getType()->getStructElementType(0)->getScalarType()); 1000 Info.align = 1001 Align(DL.getTypeSizeInBits( 1002 I.getType()->getStructElementType(0)->getScalarType()) / 1003 8); 1004 Info.size = MemoryLocation::UnknownSize; 1005 Info.flags |= MachineMemOperand::MOLoad; 1006 return true; 1007 } 1008 } 1009 1010 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, 1011 const AddrMode &AM, Type *Ty, 1012 unsigned AS, 1013 Instruction *I) const { 1014 // No global is ever allowed as a base. 1015 if (AM.BaseGV) 1016 return false; 1017 1018 // RVV instructions only support register addressing. 1019 if (Subtarget.hasVInstructions() && isa<VectorType>(Ty)) 1020 return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs; 1021 1022 // Require a 12-bit signed offset. 1023 if (!isInt<12>(AM.BaseOffs)) 1024 return false; 1025 1026 switch (AM.Scale) { 1027 case 0: // "r+i" or just "i", depending on HasBaseReg. 1028 break; 1029 case 1: 1030 if (!AM.HasBaseReg) // allow "r+i". 1031 break; 1032 return false; // disallow "r+r" or "r+r+i". 1033 default: 1034 return false; 1035 } 1036 1037 return true; 1038 } 1039 1040 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const { 1041 return isInt<12>(Imm); 1042 } 1043 1044 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const { 1045 return isInt<12>(Imm); 1046 } 1047 1048 // On RV32, 64-bit integers are split into their high and low parts and held 1049 // in two different registers, so the trunc is free since the low register can 1050 // just be used. 1051 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const { 1052 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy()) 1053 return false; 1054 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits(); 1055 unsigned DestBits = DstTy->getPrimitiveSizeInBits(); 1056 return (SrcBits == 64 && DestBits == 32); 1057 } 1058 1059 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const { 1060 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() || 1061 !SrcVT.isInteger() || !DstVT.isInteger()) 1062 return false; 1063 unsigned SrcBits = SrcVT.getSizeInBits(); 1064 unsigned DestBits = DstVT.getSizeInBits(); 1065 return (SrcBits == 64 && DestBits == 32); 1066 } 1067 1068 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 1069 // Zexts are free if they can be combined with a load. 1070 // Don't advertise i32->i64 zextload as being free for RV64. It interacts 1071 // poorly with type legalization of compares preferring sext. 1072 if (auto *LD = dyn_cast<LoadSDNode>(Val)) { 1073 EVT MemVT = LD->getMemoryVT(); 1074 if ((MemVT == MVT::i8 || MemVT == MVT::i16) && 1075 (LD->getExtensionType() == ISD::NON_EXTLOAD || 1076 LD->getExtensionType() == ISD::ZEXTLOAD)) 1077 return true; 1078 } 1079 1080 return TargetLowering::isZExtFree(Val, VT2); 1081 } 1082 1083 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const { 1084 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64; 1085 } 1086 1087 bool RISCVTargetLowering::signExtendConstant(const ConstantInt *CI) const { 1088 return Subtarget.is64Bit() && CI->getType()->isIntegerTy(32); 1089 } 1090 1091 bool RISCVTargetLowering::isCheapToSpeculateCttz() const { 1092 return Subtarget.hasStdExtZbb(); 1093 } 1094 1095 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const { 1096 return Subtarget.hasStdExtZbb(); 1097 } 1098 1099 bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const { 1100 EVT VT = Y.getValueType(); 1101 1102 // FIXME: Support vectors once we have tests. 1103 if (VT.isVector()) 1104 return false; 1105 1106 return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp() || 1107 Subtarget.hasStdExtZbkb()) && 1108 !isa<ConstantSDNode>(Y); 1109 } 1110 1111 bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const { 1112 // We can use ANDI+SEQZ/SNEZ as a bit test. Y contains the bit position. 1113 auto *C = dyn_cast<ConstantSDNode>(Y); 1114 return C && C->getAPIntValue().ule(10); 1115 } 1116 1117 bool RISCVTargetLowering:: 1118 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 1119 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y, 1120 unsigned OldShiftOpcode, unsigned NewShiftOpcode, 1121 SelectionDAG &DAG) const { 1122 // One interesting pattern that we'd want to form is 'bit extract': 1123 // ((1 >> Y) & 1) ==/!= 0 1124 // But we also need to be careful not to try to reverse that fold. 1125 1126 // Is this '((1 >> Y) & 1)'? 1127 if (XC && OldShiftOpcode == ISD::SRL && XC->isOne()) 1128 return false; // Keep the 'bit extract' pattern. 1129 1130 // Will this be '((1 >> Y) & 1)' after the transform? 1131 if (NewShiftOpcode == ISD::SRL && CC->isOne()) 1132 return true; // Do form the 'bit extract' pattern. 1133 1134 // If 'X' is a constant, and we transform, then we will immediately 1135 // try to undo the fold, thus causing endless combine loop. 1136 // So only do the transform if X is not a constant. This matches the default 1137 // implementation of this function. 1138 return !XC; 1139 } 1140 1141 /// Check if sinking \p I's operands to I's basic block is profitable, because 1142 /// the operands can be folded into a target instruction, e.g. 1143 /// splats of scalars can fold into vector instructions. 1144 bool RISCVTargetLowering::shouldSinkOperands( 1145 Instruction *I, SmallVectorImpl<Use *> &Ops) const { 1146 using namespace llvm::PatternMatch; 1147 1148 if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions()) 1149 return false; 1150 1151 auto IsSinker = [&](Instruction *I, int Operand) { 1152 switch (I->getOpcode()) { 1153 case Instruction::Add: 1154 case Instruction::Sub: 1155 case Instruction::Mul: 1156 case Instruction::And: 1157 case Instruction::Or: 1158 case Instruction::Xor: 1159 case Instruction::FAdd: 1160 case Instruction::FSub: 1161 case Instruction::FMul: 1162 case Instruction::FDiv: 1163 case Instruction::ICmp: 1164 case Instruction::FCmp: 1165 return true; 1166 case Instruction::Shl: 1167 case Instruction::LShr: 1168 case Instruction::AShr: 1169 case Instruction::UDiv: 1170 case Instruction::SDiv: 1171 case Instruction::URem: 1172 case Instruction::SRem: 1173 return Operand == 1; 1174 case Instruction::Call: 1175 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1176 switch (II->getIntrinsicID()) { 1177 case Intrinsic::fma: 1178 case Intrinsic::vp_fma: 1179 return Operand == 0 || Operand == 1; 1180 // FIXME: Our patterns can only match vx/vf instructions when the splat 1181 // it on the RHS, because TableGen doesn't recognize our VP operations 1182 // as commutative. 1183 case Intrinsic::vp_add: 1184 case Intrinsic::vp_mul: 1185 case Intrinsic::vp_and: 1186 case Intrinsic::vp_or: 1187 case Intrinsic::vp_xor: 1188 case Intrinsic::vp_fadd: 1189 case Intrinsic::vp_fmul: 1190 case Intrinsic::vp_shl: 1191 case Intrinsic::vp_lshr: 1192 case Intrinsic::vp_ashr: 1193 case Intrinsic::vp_udiv: 1194 case Intrinsic::vp_sdiv: 1195 case Intrinsic::vp_urem: 1196 case Intrinsic::vp_srem: 1197 return Operand == 1; 1198 // ... with the exception of vp.sub/vp.fsub/vp.fdiv, which have 1199 // explicit patterns for both LHS and RHS (as 'vr' versions). 1200 case Intrinsic::vp_sub: 1201 case Intrinsic::vp_fsub: 1202 case Intrinsic::vp_fdiv: 1203 return Operand == 0 || Operand == 1; 1204 default: 1205 return false; 1206 } 1207 } 1208 return false; 1209 default: 1210 return false; 1211 } 1212 }; 1213 1214 for (auto OpIdx : enumerate(I->operands())) { 1215 if (!IsSinker(I, OpIdx.index())) 1216 continue; 1217 1218 Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get()); 1219 // Make sure we are not already sinking this operand 1220 if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; })) 1221 continue; 1222 1223 // We are looking for a splat that can be sunk. 1224 if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), 1225 m_Undef(), m_ZeroMask()))) 1226 continue; 1227 1228 // All uses of the shuffle should be sunk to avoid duplicating it across gpr 1229 // and vector registers 1230 for (Use &U : Op->uses()) { 1231 Instruction *Insn = cast<Instruction>(U.getUser()); 1232 if (!IsSinker(Insn, U.getOperandNo())) 1233 return false; 1234 } 1235 1236 Ops.push_back(&Op->getOperandUse(0)); 1237 Ops.push_back(&OpIdx.value()); 1238 } 1239 return true; 1240 } 1241 1242 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 1243 bool ForCodeSize) const { 1244 // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin. 1245 if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) 1246 return false; 1247 if (VT == MVT::f32 && !Subtarget.hasStdExtF()) 1248 return false; 1249 if (VT == MVT::f64 && !Subtarget.hasStdExtD()) 1250 return false; 1251 return Imm.isZero(); 1252 } 1253 1254 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const { 1255 return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) || 1256 (VT == MVT::f32 && Subtarget.hasStdExtF()) || 1257 (VT == MVT::f64 && Subtarget.hasStdExtD()); 1258 } 1259 1260 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context, 1261 CallingConv::ID CC, 1262 EVT VT) const { 1263 // Use f32 to pass f16 if it is legal and Zfh is not enabled. 1264 // We might still end up using a GPR but that will be decided based on ABI. 1265 // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin. 1266 if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh()) 1267 return MVT::f32; 1268 1269 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT); 1270 } 1271 1272 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context, 1273 CallingConv::ID CC, 1274 EVT VT) const { 1275 // Use f32 to pass f16 if it is legal and Zfh is not enabled. 1276 // We might still end up using a GPR but that will be decided based on ABI. 1277 // FIXME: Change to Zfhmin once f16 becomes a legal type with Zfhmin. 1278 if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh()) 1279 return 1; 1280 1281 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT); 1282 } 1283 1284 // Changes the condition code and swaps operands if necessary, so the SetCC 1285 // operation matches one of the comparisons supported directly by branches 1286 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare 1287 // with 1/-1. 1288 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, 1289 ISD::CondCode &CC, SelectionDAG &DAG) { 1290 // Convert X > -1 to X >= 0. 1291 if (CC == ISD::SETGT && isAllOnesConstant(RHS)) { 1292 RHS = DAG.getConstant(0, DL, RHS.getValueType()); 1293 CC = ISD::SETGE; 1294 return; 1295 } 1296 // Convert X < 1 to 0 >= X. 1297 if (CC == ISD::SETLT && isOneConstant(RHS)) { 1298 RHS = LHS; 1299 LHS = DAG.getConstant(0, DL, RHS.getValueType()); 1300 CC = ISD::SETGE; 1301 return; 1302 } 1303 1304 switch (CC) { 1305 default: 1306 break; 1307 case ISD::SETGT: 1308 case ISD::SETLE: 1309 case ISD::SETUGT: 1310 case ISD::SETULE: 1311 CC = ISD::getSetCCSwappedOperands(CC); 1312 std::swap(LHS, RHS); 1313 break; 1314 } 1315 } 1316 1317 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { 1318 assert(VT.isScalableVector() && "Expecting a scalable vector type"); 1319 unsigned KnownSize = VT.getSizeInBits().getKnownMinValue(); 1320 if (VT.getVectorElementType() == MVT::i1) 1321 KnownSize *= 8; 1322 1323 switch (KnownSize) { 1324 default: 1325 llvm_unreachable("Invalid LMUL."); 1326 case 8: 1327 return RISCVII::VLMUL::LMUL_F8; 1328 case 16: 1329 return RISCVII::VLMUL::LMUL_F4; 1330 case 32: 1331 return RISCVII::VLMUL::LMUL_F2; 1332 case 64: 1333 return RISCVII::VLMUL::LMUL_1; 1334 case 128: 1335 return RISCVII::VLMUL::LMUL_2; 1336 case 256: 1337 return RISCVII::VLMUL::LMUL_4; 1338 case 512: 1339 return RISCVII::VLMUL::LMUL_8; 1340 } 1341 } 1342 1343 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) { 1344 switch (LMul) { 1345 default: 1346 llvm_unreachable("Invalid LMUL."); 1347 case RISCVII::VLMUL::LMUL_F8: 1348 case RISCVII::VLMUL::LMUL_F4: 1349 case RISCVII::VLMUL::LMUL_F2: 1350 case RISCVII::VLMUL::LMUL_1: 1351 return RISCV::VRRegClassID; 1352 case RISCVII::VLMUL::LMUL_2: 1353 return RISCV::VRM2RegClassID; 1354 case RISCVII::VLMUL::LMUL_4: 1355 return RISCV::VRM4RegClassID; 1356 case RISCVII::VLMUL::LMUL_8: 1357 return RISCV::VRM8RegClassID; 1358 } 1359 } 1360 1361 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { 1362 RISCVII::VLMUL LMUL = getLMUL(VT); 1363 if (LMUL == RISCVII::VLMUL::LMUL_F8 || 1364 LMUL == RISCVII::VLMUL::LMUL_F4 || 1365 LMUL == RISCVII::VLMUL::LMUL_F2 || 1366 LMUL == RISCVII::VLMUL::LMUL_1) { 1367 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 1368 "Unexpected subreg numbering"); 1369 return RISCV::sub_vrm1_0 + Index; 1370 } 1371 if (LMUL == RISCVII::VLMUL::LMUL_2) { 1372 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 1373 "Unexpected subreg numbering"); 1374 return RISCV::sub_vrm2_0 + Index; 1375 } 1376 if (LMUL == RISCVII::VLMUL::LMUL_4) { 1377 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 1378 "Unexpected subreg numbering"); 1379 return RISCV::sub_vrm4_0 + Index; 1380 } 1381 llvm_unreachable("Invalid vector type."); 1382 } 1383 1384 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) { 1385 if (VT.getVectorElementType() == MVT::i1) 1386 return RISCV::VRRegClassID; 1387 return getRegClassIDForLMUL(getLMUL(VT)); 1388 } 1389 1390 // Attempt to decompose a subvector insert/extract between VecVT and 1391 // SubVecVT via subregister indices. Returns the subregister index that 1392 // can perform the subvector insert/extract with the given element index, as 1393 // well as the index corresponding to any leftover subvectors that must be 1394 // further inserted/extracted within the register class for SubVecVT. 1395 std::pair<unsigned, unsigned> 1396 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 1397 MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, 1398 const RISCVRegisterInfo *TRI) { 1399 static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID && 1400 RISCV::VRM4RegClassID > RISCV::VRM2RegClassID && 1401 RISCV::VRM2RegClassID > RISCV::VRRegClassID), 1402 "Register classes not ordered"); 1403 unsigned VecRegClassID = getRegClassIDForVecVT(VecVT); 1404 unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT); 1405 // Try to compose a subregister index that takes us from the incoming 1406 // LMUL>1 register class down to the outgoing one. At each step we half 1407 // the LMUL: 1408 // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0 1409 // Note that this is not guaranteed to find a subregister index, such as 1410 // when we are extracting from one VR type to another. 1411 unsigned SubRegIdx = RISCV::NoSubRegister; 1412 for (const unsigned RCID : 1413 {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID}) 1414 if (VecRegClassID > RCID && SubRegClassID <= RCID) { 1415 VecVT = VecVT.getHalfNumVectorElementsVT(); 1416 bool IsHi = 1417 InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue(); 1418 SubRegIdx = TRI->composeSubRegIndices(SubRegIdx, 1419 getSubregIndexByMVT(VecVT, IsHi)); 1420 if (IsHi) 1421 InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue(); 1422 } 1423 return {SubRegIdx, InsertExtractIdx}; 1424 } 1425 1426 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar 1427 // stores for those types. 1428 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const { 1429 return !Subtarget.useRVVForFixedLengthVectors() || 1430 (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1); 1431 } 1432 1433 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const { 1434 if (ScalarTy->isPointerTy()) 1435 return true; 1436 1437 if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) || 1438 ScalarTy->isIntegerTy(32)) 1439 return true; 1440 1441 if (ScalarTy->isIntegerTy(64)) 1442 return Subtarget.hasVInstructionsI64(); 1443 1444 if (ScalarTy->isHalfTy()) 1445 return Subtarget.hasVInstructionsF16(); 1446 if (ScalarTy->isFloatTy()) 1447 return Subtarget.hasVInstructionsF32(); 1448 if (ScalarTy->isDoubleTy()) 1449 return Subtarget.hasVInstructionsF64(); 1450 1451 return false; 1452 } 1453 1454 static SDValue getVLOperand(SDValue Op) { 1455 assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 1456 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && 1457 "Unexpected opcode"); 1458 bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN; 1459 unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0); 1460 const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = 1461 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); 1462 if (!II) 1463 return SDValue(); 1464 return Op.getOperand(II->VLOperand + 1 + HasChain); 1465 } 1466 1467 static bool useRVVForFixedLengthVectorVT(MVT VT, 1468 const RISCVSubtarget &Subtarget) { 1469 assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!"); 1470 if (!Subtarget.useRVVForFixedLengthVectors()) 1471 return false; 1472 1473 // We only support a set of vector types with a consistent maximum fixed size 1474 // across all supported vector element types to avoid legalization issues. 1475 // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest 1476 // fixed-length vector type we support is 1024 bytes. 1477 if (VT.getFixedSizeInBits() > 1024 * 8) 1478 return false; 1479 1480 unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits(); 1481 1482 MVT EltVT = VT.getVectorElementType(); 1483 1484 // Don't use RVV for vectors we cannot scalarize if required. 1485 switch (EltVT.SimpleTy) { 1486 // i1 is supported but has different rules. 1487 default: 1488 return false; 1489 case MVT::i1: 1490 // Masks can only use a single register. 1491 if (VT.getVectorNumElements() > MinVLen) 1492 return false; 1493 MinVLen /= 8; 1494 break; 1495 case MVT::i8: 1496 case MVT::i16: 1497 case MVT::i32: 1498 break; 1499 case MVT::i64: 1500 if (!Subtarget.hasVInstructionsI64()) 1501 return false; 1502 break; 1503 case MVT::f16: 1504 if (!Subtarget.hasVInstructionsF16()) 1505 return false; 1506 break; 1507 case MVT::f32: 1508 if (!Subtarget.hasVInstructionsF32()) 1509 return false; 1510 break; 1511 case MVT::f64: 1512 if (!Subtarget.hasVInstructionsF64()) 1513 return false; 1514 break; 1515 } 1516 1517 // Reject elements larger than ELEN. 1518 if (EltVT.getSizeInBits() > Subtarget.getELEN()) 1519 return false; 1520 1521 unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen); 1522 // Don't use RVV for types that don't fit. 1523 if (LMul > Subtarget.getMaxLMULForFixedLengthVectors()) 1524 return false; 1525 1526 // TODO: Perhaps an artificial restriction, but worth having whilst getting 1527 // the base fixed length RVV support in place. 1528 if (!VT.isPow2VectorType()) 1529 return false; 1530 1531 return true; 1532 } 1533 1534 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const { 1535 return ::useRVVForFixedLengthVectorVT(VT, Subtarget); 1536 } 1537 1538 // Return the largest legal scalable vector type that matches VT's element type. 1539 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT, 1540 const RISCVSubtarget &Subtarget) { 1541 // This may be called before legal types are setup. 1542 assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) || 1543 useRVVForFixedLengthVectorVT(VT, Subtarget)) && 1544 "Expected legal fixed length vector!"); 1545 1546 unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits(); 1547 unsigned MaxELen = Subtarget.getELEN(); 1548 1549 MVT EltVT = VT.getVectorElementType(); 1550 switch (EltVT.SimpleTy) { 1551 default: 1552 llvm_unreachable("unexpected element type for RVV container"); 1553 case MVT::i1: 1554 case MVT::i8: 1555 case MVT::i16: 1556 case MVT::i32: 1557 case MVT::i64: 1558 case MVT::f16: 1559 case MVT::f32: 1560 case MVT::f64: { 1561 // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for 1562 // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within 1563 // each fractional LMUL we support SEW between 8 and LMUL*ELEN. 1564 unsigned NumElts = 1565 (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen; 1566 NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen); 1567 assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts"); 1568 return MVT::getScalableVectorVT(EltVT, NumElts); 1569 } 1570 } 1571 } 1572 1573 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT, 1574 const RISCVSubtarget &Subtarget) { 1575 return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT, 1576 Subtarget); 1577 } 1578 1579 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const { 1580 return ::getContainerForFixedLengthVector(*this, VT, getSubtarget()); 1581 } 1582 1583 // Grow V to consume an entire RVV register. 1584 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 1585 const RISCVSubtarget &Subtarget) { 1586 assert(VT.isScalableVector() && 1587 "Expected to convert into a scalable vector!"); 1588 assert(V.getValueType().isFixedLengthVector() && 1589 "Expected a fixed length vector operand!"); 1590 SDLoc DL(V); 1591 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 1592 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero); 1593 } 1594 1595 // Shrink V so it's just big enough to maintain a VT's worth of data. 1596 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG, 1597 const RISCVSubtarget &Subtarget) { 1598 assert(VT.isFixedLengthVector() && 1599 "Expected to convert into a fixed length vector!"); 1600 assert(V.getValueType().isScalableVector() && 1601 "Expected a scalable vector operand!"); 1602 SDLoc DL(V); 1603 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 1604 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero); 1605 } 1606 1607 /// Return the type of the mask type suitable for masking the provided 1608 /// vector type. This is simply an i1 element type vector of the same 1609 /// (possibly scalable) length. 1610 static MVT getMaskTypeFor(EVT VecVT) { 1611 assert(VecVT.isVector()); 1612 ElementCount EC = VecVT.getVectorElementCount(); 1613 return MVT::getVectorVT(MVT::i1, EC); 1614 } 1615 1616 /// Creates an all ones mask suitable for masking a vector of type VecTy with 1617 /// vector length VL. . 1618 static SDValue getAllOnesMask(MVT VecVT, SDValue VL, SDLoc DL, 1619 SelectionDAG &DAG) { 1620 MVT MaskVT = getMaskTypeFor(VecVT); 1621 return DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL); 1622 } 1623 1624 // Gets the two common "VL" operands: an all-ones mask and the vector length. 1625 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is 1626 // the vector type that it is contained in. 1627 static std::pair<SDValue, SDValue> 1628 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG, 1629 const RISCVSubtarget &Subtarget) { 1630 assert(ContainerVT.isScalableVector() && "Expecting scalable container type"); 1631 MVT XLenVT = Subtarget.getXLenVT(); 1632 SDValue VL = VecVT.isFixedLengthVector() 1633 ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT) 1634 : DAG.getRegister(RISCV::X0, XLenVT); 1635 SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG); 1636 return {Mask, VL}; 1637 } 1638 1639 // As above but assuming the given type is a scalable vector type. 1640 static std::pair<SDValue, SDValue> 1641 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG, 1642 const RISCVSubtarget &Subtarget) { 1643 assert(VecVT.isScalableVector() && "Expecting a scalable vector"); 1644 return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget); 1645 } 1646 1647 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few 1648 // of either is (currently) supported. This can get us into an infinite loop 1649 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR 1650 // as a ..., etc. 1651 // Until either (or both) of these can reliably lower any node, reporting that 1652 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks 1653 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack, 1654 // which is not desirable. 1655 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles( 1656 EVT VT, unsigned DefinedValues) const { 1657 return false; 1658 } 1659 1660 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG, 1661 const RISCVSubtarget &Subtarget) { 1662 // RISCV FP-to-int conversions saturate to the destination register size, but 1663 // don't produce 0 for nan. We can use a conversion instruction and fix the 1664 // nan case with a compare and a select. 1665 SDValue Src = Op.getOperand(0); 1666 1667 EVT DstVT = Op.getValueType(); 1668 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1669 1670 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT; 1671 unsigned Opc; 1672 if (SatVT == DstVT) 1673 Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU; 1674 else if (DstVT == MVT::i64 && SatVT == MVT::i32) 1675 Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64; 1676 else 1677 return SDValue(); 1678 // FIXME: Support other SatVTs by clamping before or after the conversion. 1679 1680 SDLoc DL(Op); 1681 SDValue FpToInt = DAG.getNode( 1682 Opc, DL, DstVT, Src, 1683 DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, Subtarget.getXLenVT())); 1684 1685 SDValue ZeroInt = DAG.getConstant(0, DL, DstVT); 1686 return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO); 1687 } 1688 1689 // Expand vector FTRUNC, FCEIL, and FFLOOR by converting to the integer domain 1690 // and back. Taking care to avoid converting values that are nan or already 1691 // correct. 1692 // TODO: Floor and ceil could be shorter by changing rounding mode, but we don't 1693 // have FRM dependencies modeled yet. 1694 static SDValue lowerFTRUNC_FCEIL_FFLOOR(SDValue Op, SelectionDAG &DAG) { 1695 MVT VT = Op.getSimpleValueType(); 1696 assert(VT.isVector() && "Unexpected type"); 1697 1698 SDLoc DL(Op); 1699 1700 // Freeze the source since we are increasing the number of uses. 1701 SDValue Src = DAG.getFreeze(Op.getOperand(0)); 1702 1703 // Truncate to integer and convert back to FP. 1704 MVT IntVT = VT.changeVectorElementTypeToInteger(); 1705 SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Src); 1706 Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated); 1707 1708 MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 1709 1710 if (Op.getOpcode() == ISD::FCEIL) { 1711 // If the truncated value is the greater than or equal to the original 1712 // value, we've computed the ceil. Otherwise, we went the wrong way and 1713 // need to increase by 1. 1714 // FIXME: This should use a masked operation. Handle here or in isel? 1715 SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Truncated, 1716 DAG.getConstantFP(1.0, DL, VT)); 1717 SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOLT); 1718 Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated); 1719 } else if (Op.getOpcode() == ISD::FFLOOR) { 1720 // If the truncated value is the less than or equal to the original value, 1721 // we've computed the floor. Otherwise, we went the wrong way and need to 1722 // decrease by 1. 1723 // FIXME: This should use a masked operation. Handle here or in isel? 1724 SDValue Adjust = DAG.getNode(ISD::FSUB, DL, VT, Truncated, 1725 DAG.getConstantFP(1.0, DL, VT)); 1726 SDValue NeedAdjust = DAG.getSetCC(DL, SetccVT, Truncated, Src, ISD::SETOGT); 1727 Truncated = DAG.getSelect(DL, VT, NeedAdjust, Adjust, Truncated); 1728 } 1729 1730 // Restore the original sign so that -0.0 is preserved. 1731 Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src); 1732 1733 // Determine the largest integer that can be represented exactly. This and 1734 // values larger than it don't have any fractional bits so don't need to 1735 // be converted. 1736 const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT); 1737 unsigned Precision = APFloat::semanticsPrecision(FltSem); 1738 APFloat MaxVal = APFloat(FltSem); 1739 MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1), 1740 /*IsSigned*/ false, APFloat::rmNearestTiesToEven); 1741 SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT); 1742 1743 // If abs(Src) was larger than MaxVal or nan, keep it. 1744 SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src); 1745 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT); 1746 return DAG.getSelect(DL, VT, Setcc, Truncated, Src); 1747 } 1748 1749 // ISD::FROUND is defined to round to nearest with ties rounding away from 0. 1750 // This mode isn't supported in vector hardware on RISCV. But as long as we 1751 // aren't compiling with trapping math, we can emulate this with 1752 // floor(X + copysign(nextafter(0.5, 0.0), X)). 1753 // FIXME: Could be shorter by changing rounding mode, but we don't have FRM 1754 // dependencies modeled yet. 1755 // FIXME: Use masked operations to avoid final merge. 1756 static SDValue lowerFROUND(SDValue Op, SelectionDAG &DAG) { 1757 MVT VT = Op.getSimpleValueType(); 1758 assert(VT.isVector() && "Unexpected type"); 1759 1760 SDLoc DL(Op); 1761 1762 // Freeze the source since we are increasing the number of uses. 1763 SDValue Src = DAG.getFreeze(Op.getOperand(0)); 1764 1765 // We do the conversion on the absolute value and fix the sign at the end. 1766 SDValue Abs = DAG.getNode(ISD::FABS, DL, VT, Src); 1767 1768 const fltSemantics &FltSem = DAG.EVTToAPFloatSemantics(VT); 1769 bool Ignored; 1770 APFloat Point5Pred = APFloat(0.5f); 1771 Point5Pred.convert(FltSem, APFloat::rmNearestTiesToEven, &Ignored); 1772 Point5Pred.next(/*nextDown*/ true); 1773 1774 // Add the adjustment. 1775 SDValue Adjust = DAG.getNode(ISD::FADD, DL, VT, Abs, 1776 DAG.getConstantFP(Point5Pred, DL, VT)); 1777 1778 // Truncate to integer and convert back to fp. 1779 MVT IntVT = VT.changeVectorElementTypeToInteger(); 1780 SDValue Truncated = DAG.getNode(ISD::FP_TO_SINT, DL, IntVT, Adjust); 1781 Truncated = DAG.getNode(ISD::SINT_TO_FP, DL, VT, Truncated); 1782 1783 // Restore the original sign. 1784 Truncated = DAG.getNode(ISD::FCOPYSIGN, DL, VT, Truncated, Src); 1785 1786 // Determine the largest integer that can be represented exactly. This and 1787 // values larger than it don't have any fractional bits so don't need to 1788 // be converted. 1789 unsigned Precision = APFloat::semanticsPrecision(FltSem); 1790 APFloat MaxVal = APFloat(FltSem); 1791 MaxVal.convertFromAPInt(APInt::getOneBitSet(Precision, Precision - 1), 1792 /*IsSigned*/ false, APFloat::rmNearestTiesToEven); 1793 SDValue MaxValNode = DAG.getConstantFP(MaxVal, DL, VT); 1794 1795 // If abs(Src) was larger than MaxVal or nan, keep it. 1796 MVT SetccVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount()); 1797 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Abs, MaxValNode, ISD::SETOLT); 1798 return DAG.getSelect(DL, VT, Setcc, Truncated, Src); 1799 } 1800 1801 struct VIDSequence { 1802 int64_t StepNumerator; 1803 unsigned StepDenominator; 1804 int64_t Addend; 1805 }; 1806 1807 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S] 1808 // to the (non-zero) step S and start value X. This can be then lowered as the 1809 // RVV sequence (VID * S) + X, for example. 1810 // The step S is represented as an integer numerator divided by a positive 1811 // denominator. Note that the implementation currently only identifies 1812 // sequences in which either the numerator is +/- 1 or the denominator is 1. It 1813 // cannot detect 2/3, for example. 1814 // Note that this method will also match potentially unappealing index 1815 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to 1816 // determine whether this is worth generating code for. 1817 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) { 1818 unsigned NumElts = Op.getNumOperands(); 1819 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR"); 1820 if (!Op.getValueType().isInteger()) 1821 return None; 1822 1823 Optional<unsigned> SeqStepDenom; 1824 Optional<int64_t> SeqStepNum, SeqAddend; 1825 Optional<std::pair<uint64_t, unsigned>> PrevElt; 1826 unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits(); 1827 for (unsigned Idx = 0; Idx < NumElts; Idx++) { 1828 // Assume undef elements match the sequence; we just have to be careful 1829 // when interpolating across them. 1830 if (Op.getOperand(Idx).isUndef()) 1831 continue; 1832 // The BUILD_VECTOR must be all constants. 1833 if (!isa<ConstantSDNode>(Op.getOperand(Idx))) 1834 return None; 1835 1836 uint64_t Val = Op.getConstantOperandVal(Idx) & 1837 maskTrailingOnes<uint64_t>(EltSizeInBits); 1838 1839 if (PrevElt) { 1840 // Calculate the step since the last non-undef element, and ensure 1841 // it's consistent across the entire sequence. 1842 unsigned IdxDiff = Idx - PrevElt->second; 1843 int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits); 1844 1845 // A zero-value value difference means that we're somewhere in the middle 1846 // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a 1847 // step change before evaluating the sequence. 1848 if (ValDiff == 0) 1849 continue; 1850 1851 int64_t Remainder = ValDiff % IdxDiff; 1852 // Normalize the step if it's greater than 1. 1853 if (Remainder != ValDiff) { 1854 // The difference must cleanly divide the element span. 1855 if (Remainder != 0) 1856 return None; 1857 ValDiff /= IdxDiff; 1858 IdxDiff = 1; 1859 } 1860 1861 if (!SeqStepNum) 1862 SeqStepNum = ValDiff; 1863 else if (ValDiff != SeqStepNum) 1864 return None; 1865 1866 if (!SeqStepDenom) 1867 SeqStepDenom = IdxDiff; 1868 else if (IdxDiff != *SeqStepDenom) 1869 return None; 1870 } 1871 1872 // Record this non-undef element for later. 1873 if (!PrevElt || PrevElt->first != Val) 1874 PrevElt = std::make_pair(Val, Idx); 1875 } 1876 1877 // We need to have logged a step for this to count as a legal index sequence. 1878 if (!SeqStepNum || !SeqStepDenom) 1879 return None; 1880 1881 // Loop back through the sequence and validate elements we might have skipped 1882 // while waiting for a valid step. While doing this, log any sequence addend. 1883 for (unsigned Idx = 0; Idx < NumElts; Idx++) { 1884 if (Op.getOperand(Idx).isUndef()) 1885 continue; 1886 uint64_t Val = Op.getConstantOperandVal(Idx) & 1887 maskTrailingOnes<uint64_t>(EltSizeInBits); 1888 uint64_t ExpectedVal = 1889 (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom; 1890 int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits); 1891 if (!SeqAddend) 1892 SeqAddend = Addend; 1893 else if (Addend != SeqAddend) 1894 return None; 1895 } 1896 1897 assert(SeqAddend && "Must have an addend if we have a step"); 1898 1899 return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend}; 1900 } 1901 1902 // Match a splatted value (SPLAT_VECTOR/BUILD_VECTOR) of an EXTRACT_VECTOR_ELT 1903 // and lower it as a VRGATHER_VX_VL from the source vector. 1904 static SDValue matchSplatAsGather(SDValue SplatVal, MVT VT, const SDLoc &DL, 1905 SelectionDAG &DAG, 1906 const RISCVSubtarget &Subtarget) { 1907 if (SplatVal.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 1908 return SDValue(); 1909 SDValue Vec = SplatVal.getOperand(0); 1910 // Only perform this optimization on vectors of the same size for simplicity. 1911 if (Vec.getValueType() != VT) 1912 return SDValue(); 1913 SDValue Idx = SplatVal.getOperand(1); 1914 // The index must be a legal type. 1915 if (Idx.getValueType() != Subtarget.getXLenVT()) 1916 return SDValue(); 1917 1918 MVT ContainerVT = VT; 1919 if (VT.isFixedLengthVector()) { 1920 ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); 1921 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 1922 } 1923 1924 SDValue Mask, VL; 1925 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1926 1927 SDValue Gather = DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, Vec, 1928 Idx, Mask, VL); 1929 1930 if (!VT.isFixedLengthVector()) 1931 return Gather; 1932 1933 return convertFromScalableVector(VT, Gather, DAG, Subtarget); 1934 } 1935 1936 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, 1937 const RISCVSubtarget &Subtarget) { 1938 MVT VT = Op.getSimpleValueType(); 1939 assert(VT.isFixedLengthVector() && "Unexpected vector!"); 1940 1941 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); 1942 1943 SDLoc DL(Op); 1944 SDValue Mask, VL; 1945 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 1946 1947 MVT XLenVT = Subtarget.getXLenVT(); 1948 unsigned NumElts = Op.getNumOperands(); 1949 1950 if (VT.getVectorElementType() == MVT::i1) { 1951 if (ISD::isBuildVectorAllZeros(Op.getNode())) { 1952 SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL); 1953 return convertFromScalableVector(VT, VMClr, DAG, Subtarget); 1954 } 1955 1956 if (ISD::isBuildVectorAllOnes(Op.getNode())) { 1957 SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); 1958 return convertFromScalableVector(VT, VMSet, DAG, Subtarget); 1959 } 1960 1961 // Lower constant mask BUILD_VECTORs via an integer vector type, in 1962 // scalar integer chunks whose bit-width depends on the number of mask 1963 // bits and XLEN. 1964 // First, determine the most appropriate scalar integer type to use. This 1965 // is at most XLenVT, but may be shrunk to a smaller vector element type 1966 // according to the size of the final vector - use i8 chunks rather than 1967 // XLenVT if we're producing a v8i1. This results in more consistent 1968 // codegen across RV32 and RV64. 1969 unsigned NumViaIntegerBits = 1970 std::min(std::max(NumElts, 8u), Subtarget.getXLen()); 1971 NumViaIntegerBits = std::min(NumViaIntegerBits, Subtarget.getELEN()); 1972 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) { 1973 // If we have to use more than one INSERT_VECTOR_ELT then this 1974 // optimization is likely to increase code size; avoid peforming it in 1975 // such a case. We can use a load from a constant pool in this case. 1976 if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits) 1977 return SDValue(); 1978 // Now we can create our integer vector type. Note that it may be larger 1979 // than the resulting mask type: v4i1 would use v1i8 as its integer type. 1980 MVT IntegerViaVecVT = 1981 MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits), 1982 divideCeil(NumElts, NumViaIntegerBits)); 1983 1984 uint64_t Bits = 0; 1985 unsigned BitPos = 0, IntegerEltIdx = 0; 1986 SDValue Vec = DAG.getUNDEF(IntegerViaVecVT); 1987 1988 for (unsigned I = 0; I < NumElts; I++, BitPos++) { 1989 // Once we accumulate enough bits to fill our scalar type, insert into 1990 // our vector and clear our accumulated data. 1991 if (I != 0 && I % NumViaIntegerBits == 0) { 1992 if (NumViaIntegerBits <= 32) 1993 Bits = SignExtend64(Bits, 32); 1994 SDValue Elt = DAG.getConstant(Bits, DL, XLenVT); 1995 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, 1996 Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT)); 1997 Bits = 0; 1998 BitPos = 0; 1999 IntegerEltIdx++; 2000 } 2001 SDValue V = Op.getOperand(I); 2002 bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue(); 2003 Bits |= ((uint64_t)BitValue << BitPos); 2004 } 2005 2006 // Insert the (remaining) scalar value into position in our integer 2007 // vector type. 2008 if (NumViaIntegerBits <= 32) 2009 Bits = SignExtend64(Bits, 32); 2010 SDValue Elt = DAG.getConstant(Bits, DL, XLenVT); 2011 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt, 2012 DAG.getConstant(IntegerEltIdx, DL, XLenVT)); 2013 2014 if (NumElts < NumViaIntegerBits) { 2015 // If we're producing a smaller vector than our minimum legal integer 2016 // type, bitcast to the equivalent (known-legal) mask type, and extract 2017 // our final mask. 2018 assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type"); 2019 Vec = DAG.getBitcast(MVT::v8i1, Vec); 2020 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec, 2021 DAG.getConstant(0, DL, XLenVT)); 2022 } else { 2023 // Else we must have produced an integer type with the same size as the 2024 // mask type; bitcast for the final result. 2025 assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits()); 2026 Vec = DAG.getBitcast(VT, Vec); 2027 } 2028 2029 return Vec; 2030 } 2031 2032 // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask 2033 // vector type, we have a legal equivalently-sized i8 type, so we can use 2034 // that. 2035 MVT WideVecVT = VT.changeVectorElementType(MVT::i8); 2036 SDValue VecZero = DAG.getConstant(0, DL, WideVecVT); 2037 2038 SDValue WideVec; 2039 if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { 2040 // For a splat, perform a scalar truncate before creating the wider 2041 // vector. 2042 assert(Splat.getValueType() == XLenVT && 2043 "Unexpected type for i1 splat value"); 2044 Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat, 2045 DAG.getConstant(1, DL, XLenVT)); 2046 WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat); 2047 } else { 2048 SmallVector<SDValue, 8> Ops(Op->op_values()); 2049 WideVec = DAG.getBuildVector(WideVecVT, DL, Ops); 2050 SDValue VecOne = DAG.getConstant(1, DL, WideVecVT); 2051 WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne); 2052 } 2053 2054 return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE); 2055 } 2056 2057 if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) { 2058 if (auto Gather = matchSplatAsGather(Splat, VT, DL, DAG, Subtarget)) 2059 return Gather; 2060 unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL 2061 : RISCVISD::VMV_V_X_VL; 2062 Splat = 2063 DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Splat, VL); 2064 return convertFromScalableVector(VT, Splat, DAG, Subtarget); 2065 } 2066 2067 // Try and match index sequences, which we can lower to the vid instruction 2068 // with optional modifications. An all-undef vector is matched by 2069 // getSplatValue, above. 2070 if (auto SimpleVID = isSimpleVIDSequence(Op)) { 2071 int64_t StepNumerator = SimpleVID->StepNumerator; 2072 unsigned StepDenominator = SimpleVID->StepDenominator; 2073 int64_t Addend = SimpleVID->Addend; 2074 2075 assert(StepNumerator != 0 && "Invalid step"); 2076 bool Negate = false; 2077 int64_t SplatStepVal = StepNumerator; 2078 unsigned StepOpcode = ISD::MUL; 2079 if (StepNumerator != 1) { 2080 if (isPowerOf2_64(std::abs(StepNumerator))) { 2081 Negate = StepNumerator < 0; 2082 StepOpcode = ISD::SHL; 2083 SplatStepVal = Log2_64(std::abs(StepNumerator)); 2084 } 2085 } 2086 2087 // Only emit VIDs with suitably-small steps/addends. We use imm5 is a 2088 // threshold since it's the immediate value many RVV instructions accept. 2089 // There is no vmul.vi instruction so ensure multiply constant can fit in 2090 // a single addi instruction. 2091 if (((StepOpcode == ISD::MUL && isInt<12>(SplatStepVal)) || 2092 (StepOpcode == ISD::SHL && isUInt<5>(SplatStepVal))) && 2093 isPowerOf2_32(StepDenominator) && 2094 (SplatStepVal >= 0 || StepDenominator == 1) && isInt<5>(Addend)) { 2095 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL); 2096 // Convert right out of the scalable type so we can use standard ISD 2097 // nodes for the rest of the computation. If we used scalable types with 2098 // these, we'd lose the fixed-length vector info and generate worse 2099 // vsetvli code. 2100 VID = convertFromScalableVector(VT, VID, DAG, Subtarget); 2101 if ((StepOpcode == ISD::MUL && SplatStepVal != 1) || 2102 (StepOpcode == ISD::SHL && SplatStepVal != 0)) { 2103 SDValue SplatStep = DAG.getSplatBuildVector( 2104 VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT)); 2105 VID = DAG.getNode(StepOpcode, DL, VT, VID, SplatStep); 2106 } 2107 if (StepDenominator != 1) { 2108 SDValue SplatStep = DAG.getSplatBuildVector( 2109 VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT)); 2110 VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep); 2111 } 2112 if (Addend != 0 || Negate) { 2113 SDValue SplatAddend = DAG.getSplatBuildVector( 2114 VT, DL, DAG.getConstant(Addend, DL, XLenVT)); 2115 VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID); 2116 } 2117 return VID; 2118 } 2119 } 2120 2121 // Attempt to detect "hidden" splats, which only reveal themselves as splats 2122 // when re-interpreted as a vector with a larger element type. For example, 2123 // v4i16 = build_vector i16 0, i16 1, i16 0, i16 1 2124 // could be instead splat as 2125 // v2i32 = build_vector i32 0x00010000, i32 0x00010000 2126 // TODO: This optimization could also work on non-constant splats, but it 2127 // would require bit-manipulation instructions to construct the splat value. 2128 SmallVector<SDValue> Sequence; 2129 unsigned EltBitSize = VT.getScalarSizeInBits(); 2130 const auto *BV = cast<BuildVectorSDNode>(Op); 2131 if (VT.isInteger() && EltBitSize < 64 && 2132 ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) && 2133 BV->getRepeatedSequence(Sequence) && 2134 (Sequence.size() * EltBitSize) <= 64) { 2135 unsigned SeqLen = Sequence.size(); 2136 MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen); 2137 MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen); 2138 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 || 2139 ViaIntVT == MVT::i64) && 2140 "Unexpected sequence type"); 2141 2142 unsigned EltIdx = 0; 2143 uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize); 2144 uint64_t SplatValue = 0; 2145 // Construct the amalgamated value which can be splatted as this larger 2146 // vector type. 2147 for (const auto &SeqV : Sequence) { 2148 if (!SeqV.isUndef()) 2149 SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask) 2150 << (EltIdx * EltBitSize)); 2151 EltIdx++; 2152 } 2153 2154 // On RV64, sign-extend from 32 to 64 bits where possible in order to 2155 // achieve better constant materializion. 2156 if (Subtarget.is64Bit() && ViaIntVT == MVT::i32) 2157 SplatValue = SignExtend64(SplatValue, 32); 2158 2159 // Since we can't introduce illegal i64 types at this stage, we can only 2160 // perform an i64 splat on RV32 if it is its own sign-extended value. That 2161 // way we can use RVV instructions to splat. 2162 assert((ViaIntVT.bitsLE(XLenVT) || 2163 (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) && 2164 "Unexpected bitcast sequence"); 2165 if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) { 2166 SDValue ViaVL = 2167 DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT); 2168 MVT ViaContainerVT = 2169 getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget); 2170 SDValue Splat = 2171 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT, 2172 DAG.getUNDEF(ViaContainerVT), 2173 DAG.getConstant(SplatValue, DL, XLenVT), ViaVL); 2174 Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget); 2175 return DAG.getBitcast(VT, Splat); 2176 } 2177 } 2178 2179 // Try and optimize BUILD_VECTORs with "dominant values" - these are values 2180 // which constitute a large proportion of the elements. In such cases we can 2181 // splat a vector with the dominant element and make up the shortfall with 2182 // INSERT_VECTOR_ELTs. 2183 // Note that this includes vectors of 2 elements by association. The 2184 // upper-most element is the "dominant" one, allowing us to use a splat to 2185 // "insert" the upper element, and an insert of the lower element at position 2186 // 0, which improves codegen. 2187 SDValue DominantValue; 2188 unsigned MostCommonCount = 0; 2189 DenseMap<SDValue, unsigned> ValueCounts; 2190 unsigned NumUndefElts = 2191 count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); }); 2192 2193 // Track the number of scalar loads we know we'd be inserting, estimated as 2194 // any non-zero floating-point constant. Other kinds of element are either 2195 // already in registers or are materialized on demand. The threshold at which 2196 // a vector load is more desirable than several scalar materializion and 2197 // vector-insertion instructions is not known. 2198 unsigned NumScalarLoads = 0; 2199 2200 for (SDValue V : Op->op_values()) { 2201 if (V.isUndef()) 2202 continue; 2203 2204 ValueCounts.insert(std::make_pair(V, 0)); 2205 unsigned &Count = ValueCounts[V]; 2206 2207 if (auto *CFP = dyn_cast<ConstantFPSDNode>(V)) 2208 NumScalarLoads += !CFP->isExactlyValue(+0.0); 2209 2210 // Is this value dominant? In case of a tie, prefer the highest element as 2211 // it's cheaper to insert near the beginning of a vector than it is at the 2212 // end. 2213 if (++Count >= MostCommonCount) { 2214 DominantValue = V; 2215 MostCommonCount = Count; 2216 } 2217 } 2218 2219 assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR"); 2220 unsigned NumDefElts = NumElts - NumUndefElts; 2221 unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2; 2222 2223 // Don't perform this optimization when optimizing for size, since 2224 // materializing elements and inserting them tends to cause code bloat. 2225 if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts && 2226 ((MostCommonCount > DominantValueCountThreshold) || 2227 (ValueCounts.size() <= Log2_32(NumDefElts)))) { 2228 // Start by splatting the most common element. 2229 SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue); 2230 2231 DenseSet<SDValue> Processed{DominantValue}; 2232 MVT SelMaskTy = VT.changeVectorElementType(MVT::i1); 2233 for (const auto &OpIdx : enumerate(Op->ops())) { 2234 const SDValue &V = OpIdx.value(); 2235 if (V.isUndef() || !Processed.insert(V).second) 2236 continue; 2237 if (ValueCounts[V] == 1) { 2238 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, 2239 DAG.getConstant(OpIdx.index(), DL, XLenVT)); 2240 } else { 2241 // Blend in all instances of this value using a VSELECT, using a 2242 // mask where each bit signals whether that element is the one 2243 // we're after. 2244 SmallVector<SDValue> Ops; 2245 transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) { 2246 return DAG.getConstant(V == V1, DL, XLenVT); 2247 }); 2248 Vec = DAG.getNode(ISD::VSELECT, DL, VT, 2249 DAG.getBuildVector(SelMaskTy, DL, Ops), 2250 DAG.getSplatBuildVector(VT, DL, V), Vec); 2251 } 2252 } 2253 2254 return Vec; 2255 } 2256 2257 return SDValue(); 2258 } 2259 2260 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, 2261 SDValue Lo, SDValue Hi, SDValue VL, 2262 SelectionDAG &DAG) { 2263 if (!Passthru) 2264 Passthru = DAG.getUNDEF(VT); 2265 if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) { 2266 int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue(); 2267 int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue(); 2268 // If Hi constant is all the same sign bit as Lo, lower this as a custom 2269 // node in order to try and match RVV vector/scalar instructions. 2270 if ((LoC >> 31) == HiC) 2271 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Lo, VL); 2272 2273 // If vl is equal to XLEN_MAX and Hi constant is equal to Lo, we could use 2274 // vmv.v.x whose EEW = 32 to lower it. 2275 auto *Const = dyn_cast<ConstantSDNode>(VL); 2276 if (LoC == HiC && Const && Const->isAllOnesValue()) { 2277 MVT InterVT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2); 2278 // TODO: if vl <= min(VLMAX), we can also do this. But we could not 2279 // access the subtarget here now. 2280 auto InterVec = DAG.getNode( 2281 RISCVISD::VMV_V_X_VL, DL, InterVT, DAG.getUNDEF(InterVT), Lo, 2282 DAG.getRegister(RISCV::X0, MVT::i32)); 2283 return DAG.getNode(ISD::BITCAST, DL, VT, InterVec); 2284 } 2285 } 2286 2287 // Fall back to a stack store and stride x0 vector load. 2288 return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Passthru, Lo, 2289 Hi, VL); 2290 } 2291 2292 // Called by type legalization to handle splat of i64 on RV32. 2293 // FIXME: We can optimize this when the type has sign or zero bits in one 2294 // of the halves. 2295 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Passthru, 2296 SDValue Scalar, SDValue VL, 2297 SelectionDAG &DAG) { 2298 assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!"); 2299 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 2300 DAG.getConstant(0, DL, MVT::i32)); 2301 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar, 2302 DAG.getConstant(1, DL, MVT::i32)); 2303 return splatPartsI64WithVL(DL, VT, Passthru, Lo, Hi, VL, DAG); 2304 } 2305 2306 // This function lowers a splat of a scalar operand Splat with the vector 2307 // length VL. It ensures the final sequence is type legal, which is useful when 2308 // lowering a splat after type legalization. 2309 static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL, 2310 MVT VT, SDLoc DL, SelectionDAG &DAG, 2311 const RISCVSubtarget &Subtarget) { 2312 bool HasPassthru = Passthru && !Passthru.isUndef(); 2313 if (!HasPassthru && !Passthru) 2314 Passthru = DAG.getUNDEF(VT); 2315 if (VT.isFloatingPoint()) { 2316 // If VL is 1, we could use vfmv.s.f. 2317 if (isOneConstant(VL)) 2318 return DAG.getNode(RISCVISD::VFMV_S_F_VL, DL, VT, Passthru, Scalar, VL); 2319 return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Passthru, Scalar, VL); 2320 } 2321 2322 MVT XLenVT = Subtarget.getXLenVT(); 2323 2324 // Simplest case is that the operand needs to be promoted to XLenVT. 2325 if (Scalar.getValueType().bitsLE(XLenVT)) { 2326 // If the operand is a constant, sign extend to increase our chances 2327 // of being able to use a .vi instruction. ANY_EXTEND would become a 2328 // a zero extend and the simm5 check in isel would fail. 2329 // FIXME: Should we ignore the upper bits in isel instead? 2330 unsigned ExtOpc = 2331 isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; 2332 Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar); 2333 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Scalar); 2334 // If VL is 1 and the scalar value won't benefit from immediate, we could 2335 // use vmv.s.x. 2336 if (isOneConstant(VL) && 2337 (!Const || isNullConstant(Scalar) || !isInt<5>(Const->getSExtValue()))) 2338 return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, Scalar, VL); 2339 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Passthru, Scalar, VL); 2340 } 2341 2342 assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 && 2343 "Unexpected scalar for splat lowering!"); 2344 2345 if (isOneConstant(VL) && isNullConstant(Scalar)) 2346 return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, VT, Passthru, 2347 DAG.getConstant(0, DL, XLenVT), VL); 2348 2349 // Otherwise use the more complicated splatting algorithm. 2350 return splatSplitI64WithVL(DL, VT, Passthru, Scalar, VL, DAG); 2351 } 2352 2353 static bool isInterleaveShuffle(ArrayRef<int> Mask, MVT VT, bool &SwapSources, 2354 const RISCVSubtarget &Subtarget) { 2355 // We need to be able to widen elements to the next larger integer type. 2356 if (VT.getScalarSizeInBits() >= Subtarget.getELEN()) 2357 return false; 2358 2359 int Size = Mask.size(); 2360 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size"); 2361 2362 int Srcs[] = {-1, -1}; 2363 for (int i = 0; i != Size; ++i) { 2364 // Ignore undef elements. 2365 if (Mask[i] < 0) 2366 continue; 2367 2368 // Is this an even or odd element. 2369 int Pol = i % 2; 2370 2371 // Ensure we consistently use the same source for this element polarity. 2372 int Src = Mask[i] / Size; 2373 if (Srcs[Pol] < 0) 2374 Srcs[Pol] = Src; 2375 if (Srcs[Pol] != Src) 2376 return false; 2377 2378 // Make sure the element within the source is appropriate for this element 2379 // in the destination. 2380 int Elt = Mask[i] % Size; 2381 if (Elt != i / 2) 2382 return false; 2383 } 2384 2385 // We need to find a source for each polarity and they can't be the same. 2386 if (Srcs[0] < 0 || Srcs[1] < 0 || Srcs[0] == Srcs[1]) 2387 return false; 2388 2389 // Swap the sources if the second source was in the even polarity. 2390 SwapSources = Srcs[0] > Srcs[1]; 2391 2392 return true; 2393 } 2394 2395 /// Match shuffles that concatenate two vectors, rotate the concatenation, 2396 /// and then extract the original number of elements from the rotated result. 2397 /// This is equivalent to vector.splice or X86's PALIGNR instruction. The 2398 /// returned rotation amount is for a rotate right, where elements move from 2399 /// higher elements to lower elements. \p LoSrc indicates the first source 2400 /// vector of the rotate or -1 for undef. \p HiSrc indicates the second vector 2401 /// of the rotate or -1 for undef. At least one of \p LoSrc and \p HiSrc will be 2402 /// 0 or 1 if a rotation is found. 2403 /// 2404 /// NOTE: We talk about rotate to the right which matches how bit shift and 2405 /// rotate instructions are described where LSBs are on the right, but LLVM IR 2406 /// and the table below write vectors with the lowest elements on the left. 2407 static int isElementRotate(int &LoSrc, int &HiSrc, ArrayRef<int> Mask) { 2408 int Size = Mask.size(); 2409 2410 // We need to detect various ways of spelling a rotation: 2411 // [11, 12, 13, 14, 15, 0, 1, 2] 2412 // [-1, 12, 13, 14, -1, -1, 1, -1] 2413 // [-1, -1, -1, -1, -1, -1, 1, 2] 2414 // [ 3, 4, 5, 6, 7, 8, 9, 10] 2415 // [-1, 4, 5, 6, -1, -1, 9, -1] 2416 // [-1, 4, 5, 6, -1, -1, -1, -1] 2417 int Rotation = 0; 2418 LoSrc = -1; 2419 HiSrc = -1; 2420 for (int i = 0; i != Size; ++i) { 2421 int M = Mask[i]; 2422 if (M < 0) 2423 continue; 2424 2425 // Determine where a rotate vector would have started. 2426 int StartIdx = i - (M % Size); 2427 // The identity rotation isn't interesting, stop. 2428 if (StartIdx == 0) 2429 return -1; 2430 2431 // If we found the tail of a vector the rotation must be the missing 2432 // front. If we found the head of a vector, it must be how much of the 2433 // head. 2434 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx; 2435 2436 if (Rotation == 0) 2437 Rotation = CandidateRotation; 2438 else if (Rotation != CandidateRotation) 2439 // The rotations don't match, so we can't match this mask. 2440 return -1; 2441 2442 // Compute which value this mask is pointing at. 2443 int MaskSrc = M < Size ? 0 : 1; 2444 2445 // Compute which of the two target values this index should be assigned to. 2446 // This reflects whether the high elements are remaining or the low elemnts 2447 // are remaining. 2448 int &TargetSrc = StartIdx < 0 ? HiSrc : LoSrc; 2449 2450 // Either set up this value if we've not encountered it before, or check 2451 // that it remains consistent. 2452 if (TargetSrc < 0) 2453 TargetSrc = MaskSrc; 2454 else if (TargetSrc != MaskSrc) 2455 // This may be a rotation, but it pulls from the inputs in some 2456 // unsupported interleaving. 2457 return -1; 2458 } 2459 2460 // Check that we successfully analyzed the mask, and normalize the results. 2461 assert(Rotation != 0 && "Failed to locate a viable rotation!"); 2462 assert((LoSrc >= 0 || HiSrc >= 0) && 2463 "Failed to find a rotated input vector!"); 2464 2465 return Rotation; 2466 } 2467 2468 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, 2469 const RISCVSubtarget &Subtarget) { 2470 SDValue V1 = Op.getOperand(0); 2471 SDValue V2 = Op.getOperand(1); 2472 SDLoc DL(Op); 2473 MVT XLenVT = Subtarget.getXLenVT(); 2474 MVT VT = Op.getSimpleValueType(); 2475 unsigned NumElts = VT.getVectorNumElements(); 2476 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 2477 2478 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget); 2479 2480 SDValue TrueMask, VL; 2481 std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 2482 2483 if (SVN->isSplat()) { 2484 const int Lane = SVN->getSplatIndex(); 2485 if (Lane >= 0) { 2486 MVT SVT = VT.getVectorElementType(); 2487 2488 // Turn splatted vector load into a strided load with an X0 stride. 2489 SDValue V = V1; 2490 // Peek through CONCAT_VECTORS as VectorCombine can concat a vector 2491 // with undef. 2492 // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts? 2493 int Offset = Lane; 2494 if (V.getOpcode() == ISD::CONCAT_VECTORS) { 2495 int OpElements = 2496 V.getOperand(0).getSimpleValueType().getVectorNumElements(); 2497 V = V.getOperand(Offset / OpElements); 2498 Offset %= OpElements; 2499 } 2500 2501 // We need to ensure the load isn't atomic or volatile. 2502 if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) { 2503 auto *Ld = cast<LoadSDNode>(V); 2504 Offset *= SVT.getStoreSize(); 2505 SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(), 2506 TypeSize::Fixed(Offset), DL); 2507 2508 // If this is SEW=64 on RV32, use a strided load with a stride of x0. 2509 if (SVT.isInteger() && SVT.bitsGT(XLenVT)) { 2510 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 2511 SDValue IntID = 2512 DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT); 2513 SDValue Ops[] = {Ld->getChain(), 2514 IntID, 2515 DAG.getUNDEF(ContainerVT), 2516 NewAddr, 2517 DAG.getRegister(RISCV::X0, XLenVT), 2518 VL}; 2519 SDValue NewLoad = DAG.getMemIntrinsicNode( 2520 ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT, 2521 DAG.getMachineFunction().getMachineMemOperand( 2522 Ld->getMemOperand(), Offset, SVT.getStoreSize())); 2523 DAG.makeEquivalentMemoryOrdering(Ld, NewLoad); 2524 return convertFromScalableVector(VT, NewLoad, DAG, Subtarget); 2525 } 2526 2527 // Otherwise use a scalar load and splat. This will give the best 2528 // opportunity to fold a splat into the operation. ISel can turn it into 2529 // the x0 strided load if we aren't able to fold away the select. 2530 if (SVT.isFloatingPoint()) 2531 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr, 2532 Ld->getPointerInfo().getWithOffset(Offset), 2533 Ld->getOriginalAlign(), 2534 Ld->getMemOperand()->getFlags()); 2535 else 2536 V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr, 2537 Ld->getPointerInfo().getWithOffset(Offset), SVT, 2538 Ld->getOriginalAlign(), 2539 Ld->getMemOperand()->getFlags()); 2540 DAG.makeEquivalentMemoryOrdering(Ld, V); 2541 2542 unsigned Opc = 2543 VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL; 2544 SDValue Splat = 2545 DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), V, VL); 2546 return convertFromScalableVector(VT, Splat, DAG, Subtarget); 2547 } 2548 2549 V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); 2550 assert(Lane < (int)NumElts && "Unexpected lane!"); 2551 SDValue Gather = 2552 DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1, 2553 DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL); 2554 return convertFromScalableVector(VT, Gather, DAG, Subtarget); 2555 } 2556 } 2557 2558 ArrayRef<int> Mask = SVN->getMask(); 2559 2560 // Lower rotations to a SLIDEDOWN and a SLIDEUP. One of the source vectors may 2561 // be undef which can be handled with a single SLIDEDOWN/UP. 2562 int LoSrc, HiSrc; 2563 int Rotation = isElementRotate(LoSrc, HiSrc, Mask); 2564 if (Rotation > 0) { 2565 SDValue LoV, HiV; 2566 if (LoSrc >= 0) { 2567 LoV = LoSrc == 0 ? V1 : V2; 2568 LoV = convertToScalableVector(ContainerVT, LoV, DAG, Subtarget); 2569 } 2570 if (HiSrc >= 0) { 2571 HiV = HiSrc == 0 ? V1 : V2; 2572 HiV = convertToScalableVector(ContainerVT, HiV, DAG, Subtarget); 2573 } 2574 2575 // We found a rotation. We need to slide HiV down by Rotation. Then we need 2576 // to slide LoV up by (NumElts - Rotation). 2577 unsigned InvRotate = NumElts - Rotation; 2578 2579 SDValue Res = DAG.getUNDEF(ContainerVT); 2580 if (HiV) { 2581 // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN. 2582 // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it 2583 // causes multiple vsetvlis in some test cases such as lowering 2584 // reduce.mul 2585 SDValue DownVL = VL; 2586 if (LoV) 2587 DownVL = DAG.getConstant(InvRotate, DL, XLenVT); 2588 Res = 2589 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, Res, HiV, 2590 DAG.getConstant(Rotation, DL, XLenVT), TrueMask, DownVL); 2591 } 2592 if (LoV) 2593 Res = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Res, LoV, 2594 DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL); 2595 2596 return convertFromScalableVector(VT, Res, DAG, Subtarget); 2597 } 2598 2599 // Detect an interleave shuffle and lower to 2600 // (vmaccu.vx (vwaddu.vx lohalf(V1), lohalf(V2)), lohalf(V2), (2^eltbits - 1)) 2601 bool SwapSources; 2602 if (isInterleaveShuffle(Mask, VT, SwapSources, Subtarget)) { 2603 // Swap sources if needed. 2604 if (SwapSources) 2605 std::swap(V1, V2); 2606 2607 // Extract the lower half of the vectors. 2608 MVT HalfVT = VT.getHalfNumVectorElementsVT(); 2609 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1, 2610 DAG.getConstant(0, DL, XLenVT)); 2611 V2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V2, 2612 DAG.getConstant(0, DL, XLenVT)); 2613 2614 // Double the element width and halve the number of elements in an int type. 2615 unsigned EltBits = VT.getScalarSizeInBits(); 2616 MVT WideIntEltVT = MVT::getIntegerVT(EltBits * 2); 2617 MVT WideIntVT = 2618 MVT::getVectorVT(WideIntEltVT, VT.getVectorNumElements() / 2); 2619 // Convert this to a scalable vector. We need to base this on the 2620 // destination size to ensure there's always a type with a smaller LMUL. 2621 MVT WideIntContainerVT = 2622 getContainerForFixedLengthVector(DAG, WideIntVT, Subtarget); 2623 2624 // Convert sources to scalable vectors with the same element count as the 2625 // larger type. 2626 MVT HalfContainerVT = MVT::getVectorVT( 2627 VT.getVectorElementType(), WideIntContainerVT.getVectorElementCount()); 2628 V1 = convertToScalableVector(HalfContainerVT, V1, DAG, Subtarget); 2629 V2 = convertToScalableVector(HalfContainerVT, V2, DAG, Subtarget); 2630 2631 // Cast sources to integer. 2632 MVT IntEltVT = MVT::getIntegerVT(EltBits); 2633 MVT IntHalfVT = 2634 MVT::getVectorVT(IntEltVT, HalfContainerVT.getVectorElementCount()); 2635 V1 = DAG.getBitcast(IntHalfVT, V1); 2636 V2 = DAG.getBitcast(IntHalfVT, V2); 2637 2638 // Freeze V2 since we use it twice and we need to be sure that the add and 2639 // multiply see the same value. 2640 V2 = DAG.getFreeze(V2); 2641 2642 // Recreate TrueMask using the widened type's element count. 2643 TrueMask = getAllOnesMask(HalfContainerVT, VL, DL, DAG); 2644 2645 // Widen V1 and V2 with 0s and add one copy of V2 to V1. 2646 SDValue Add = DAG.getNode(RISCVISD::VWADDU_VL, DL, WideIntContainerVT, V1, 2647 V2, TrueMask, VL); 2648 // Create 2^eltbits - 1 copies of V2 by multiplying by the largest integer. 2649 SDValue Multiplier = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntHalfVT, 2650 DAG.getUNDEF(IntHalfVT), 2651 DAG.getAllOnesConstant(DL, XLenVT)); 2652 SDValue WidenMul = DAG.getNode(RISCVISD::VWMULU_VL, DL, WideIntContainerVT, 2653 V2, Multiplier, TrueMask, VL); 2654 // Add the new copies to our previous addition giving us 2^eltbits copies of 2655 // V2. This is equivalent to shifting V2 left by eltbits. This should 2656 // combine with the vwmulu.vv above to form vwmaccu.vv. 2657 Add = DAG.getNode(RISCVISD::ADD_VL, DL, WideIntContainerVT, Add, WidenMul, 2658 TrueMask, VL); 2659 // Cast back to ContainerVT. We need to re-create a new ContainerVT in case 2660 // WideIntContainerVT is a larger fractional LMUL than implied by the fixed 2661 // vector VT. 2662 ContainerVT = 2663 MVT::getVectorVT(VT.getVectorElementType(), 2664 WideIntContainerVT.getVectorElementCount() * 2); 2665 Add = DAG.getBitcast(ContainerVT, Add); 2666 return convertFromScalableVector(VT, Add, DAG, Subtarget); 2667 } 2668 2669 // Detect shuffles which can be re-expressed as vector selects; these are 2670 // shuffles in which each element in the destination is taken from an element 2671 // at the corresponding index in either source vectors. 2672 bool IsSelect = all_of(enumerate(Mask), [&](const auto &MaskIdx) { 2673 int MaskIndex = MaskIdx.value(); 2674 return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts; 2675 }); 2676 2677 assert(!V1.isUndef() && "Unexpected shuffle canonicalization"); 2678 2679 SmallVector<SDValue> MaskVals; 2680 // As a backup, shuffles can be lowered via a vrgather instruction, possibly 2681 // merged with a second vrgather. 2682 SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS; 2683 2684 // By default we preserve the original operand order, and use a mask to 2685 // select LHS as true and RHS as false. However, since RVV vector selects may 2686 // feature splats but only on the LHS, we may choose to invert our mask and 2687 // instead select between RHS and LHS. 2688 bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1); 2689 bool InvertMask = IsSelect == SwapOps; 2690 2691 // Keep a track of which non-undef indices are used by each LHS/RHS shuffle 2692 // half. 2693 DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts; 2694 2695 // Now construct the mask that will be used by the vselect or blended 2696 // vrgather operation. For vrgathers, construct the appropriate indices into 2697 // each vector. 2698 for (int MaskIndex : Mask) { 2699 bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask; 2700 MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT)); 2701 if (!IsSelect) { 2702 bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts; 2703 GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0 2704 ? DAG.getConstant(MaskIndex, DL, XLenVT) 2705 : DAG.getUNDEF(XLenVT)); 2706 GatherIndicesRHS.push_back( 2707 IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT) 2708 : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT)); 2709 if (IsLHSOrUndefIndex && MaskIndex >= 0) 2710 ++LHSIndexCounts[MaskIndex]; 2711 if (!IsLHSOrUndefIndex) 2712 ++RHSIndexCounts[MaskIndex - NumElts]; 2713 } 2714 } 2715 2716 if (SwapOps) { 2717 std::swap(V1, V2); 2718 std::swap(GatherIndicesLHS, GatherIndicesRHS); 2719 } 2720 2721 assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle"); 2722 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts); 2723 SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals); 2724 2725 if (IsSelect) 2726 return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2); 2727 2728 if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) { 2729 // On such a large vector we're unable to use i8 as the index type. 2730 // FIXME: We could promote the index to i16 and use vrgatherei16, but that 2731 // may involve vector splitting if we're already at LMUL=8, or our 2732 // user-supplied maximum fixed-length LMUL. 2733 return SDValue(); 2734 } 2735 2736 unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL; 2737 unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL; 2738 MVT IndexVT = VT.changeTypeToInteger(); 2739 // Since we can't introduce illegal index types at this stage, use i16 and 2740 // vrgatherei16 if the corresponding index type for plain vrgather is greater 2741 // than XLenVT. 2742 if (IndexVT.getScalarType().bitsGT(XLenVT)) { 2743 GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL; 2744 IndexVT = IndexVT.changeVectorElementType(MVT::i16); 2745 } 2746 2747 MVT IndexContainerVT = 2748 ContainerVT.changeVectorElementType(IndexVT.getScalarType()); 2749 2750 SDValue Gather; 2751 // TODO: This doesn't trigger for i64 vectors on RV32, since there we 2752 // encounter a bitcasted BUILD_VECTOR with low/high i32 values. 2753 if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) { 2754 Gather = lowerScalarSplat(SDValue(), SplatValue, VL, ContainerVT, DL, DAG, 2755 Subtarget); 2756 } else { 2757 V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget); 2758 // If only one index is used, we can use a "splat" vrgather. 2759 // TODO: We can splat the most-common index and fix-up any stragglers, if 2760 // that's beneficial. 2761 if (LHSIndexCounts.size() == 1) { 2762 int SplatIndex = LHSIndexCounts.begin()->getFirst(); 2763 Gather = 2764 DAG.getNode(GatherVXOpc, DL, ContainerVT, V1, 2765 DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL); 2766 } else { 2767 SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS); 2768 LHSIndices = 2769 convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget); 2770 2771 Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices, 2772 TrueMask, VL); 2773 } 2774 } 2775 2776 // If a second vector operand is used by this shuffle, blend it in with an 2777 // additional vrgather. 2778 if (!V2.isUndef()) { 2779 V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget); 2780 // If only one index is used, we can use a "splat" vrgather. 2781 // TODO: We can splat the most-common index and fix-up any stragglers, if 2782 // that's beneficial. 2783 if (RHSIndexCounts.size() == 1) { 2784 int SplatIndex = RHSIndexCounts.begin()->getFirst(); 2785 V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2, 2786 DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL); 2787 } else { 2788 SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS); 2789 RHSIndices = 2790 convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget); 2791 V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask, 2792 VL); 2793 } 2794 2795 MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); 2796 SelectMask = 2797 convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget); 2798 2799 Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2, 2800 Gather, VL); 2801 } 2802 2803 return convertFromScalableVector(VT, Gather, DAG, Subtarget); 2804 } 2805 2806 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { 2807 // Support splats for any type. These should type legalize well. 2808 if (ShuffleVectorSDNode::isSplatMask(M.data(), VT)) 2809 return true; 2810 2811 // Only support legal VTs for other shuffles for now. 2812 if (!isTypeLegal(VT)) 2813 return false; 2814 2815 MVT SVT = VT.getSimpleVT(); 2816 2817 bool SwapSources; 2818 int LoSrc, HiSrc; 2819 return (isElementRotate(LoSrc, HiSrc, M) > 0) || 2820 isInterleaveShuffle(M, SVT, SwapSources, Subtarget); 2821 } 2822 2823 // Lower CTLZ_ZERO_UNDEF or CTTZ_ZERO_UNDEF by converting to FP and extracting 2824 // the exponent. 2825 static SDValue lowerCTLZ_CTTZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) { 2826 MVT VT = Op.getSimpleValueType(); 2827 unsigned EltSize = VT.getScalarSizeInBits(); 2828 SDValue Src = Op.getOperand(0); 2829 SDLoc DL(Op); 2830 2831 // We need a FP type that can represent the value. 2832 // TODO: Use f16 for i8 when possible? 2833 MVT FloatEltVT = EltSize == 32 ? MVT::f64 : MVT::f32; 2834 MVT FloatVT = MVT::getVectorVT(FloatEltVT, VT.getVectorElementCount()); 2835 2836 // Legal types should have been checked in the RISCVTargetLowering 2837 // constructor. 2838 // TODO: Splitting may make sense in some cases. 2839 assert(DAG.getTargetLoweringInfo().isTypeLegal(FloatVT) && 2840 "Expected legal float type!"); 2841 2842 // For CTTZ_ZERO_UNDEF, we need to extract the lowest set bit using X & -X. 2843 // The trailing zero count is equal to log2 of this single bit value. 2844 if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) { 2845 SDValue Neg = 2846 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src); 2847 Src = DAG.getNode(ISD::AND, DL, VT, Src, Neg); 2848 } 2849 2850 // We have a legal FP type, convert to it. 2851 SDValue FloatVal = DAG.getNode(ISD::UINT_TO_FP, DL, FloatVT, Src); 2852 // Bitcast to integer and shift the exponent to the LSB. 2853 EVT IntVT = FloatVT.changeVectorElementTypeToInteger(); 2854 SDValue Bitcast = DAG.getBitcast(IntVT, FloatVal); 2855 unsigned ShiftAmt = FloatEltVT == MVT::f64 ? 52 : 23; 2856 SDValue Shift = DAG.getNode(ISD::SRL, DL, IntVT, Bitcast, 2857 DAG.getConstant(ShiftAmt, DL, IntVT)); 2858 // Truncate back to original type to allow vnsrl. 2859 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, Shift); 2860 // The exponent contains log2 of the value in biased form. 2861 unsigned ExponentBias = FloatEltVT == MVT::f64 ? 1023 : 127; 2862 2863 // For trailing zeros, we just need to subtract the bias. 2864 if (Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF) 2865 return DAG.getNode(ISD::SUB, DL, VT, Trunc, 2866 DAG.getConstant(ExponentBias, DL, VT)); 2867 2868 // For leading zeros, we need to remove the bias and convert from log2 to 2869 // leading zeros. We can do this by subtracting from (Bias + (EltSize - 1)). 2870 unsigned Adjust = ExponentBias + (EltSize - 1); 2871 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(Adjust, DL, VT), Trunc); 2872 } 2873 2874 // While RVV has alignment restrictions, we should always be able to load as a 2875 // legal equivalently-sized byte-typed vector instead. This method is 2876 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If 2877 // the load is already correctly-aligned, it returns SDValue(). 2878 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op, 2879 SelectionDAG &DAG) const { 2880 auto *Load = cast<LoadSDNode>(Op); 2881 assert(Load && Load->getMemoryVT().isVector() && "Expected vector load"); 2882 2883 if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 2884 Load->getMemoryVT(), 2885 *Load->getMemOperand())) 2886 return SDValue(); 2887 2888 SDLoc DL(Op); 2889 MVT VT = Op.getSimpleValueType(); 2890 unsigned EltSizeBits = VT.getScalarSizeInBits(); 2891 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && 2892 "Unexpected unaligned RVV load type"); 2893 MVT NewVT = 2894 MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8)); 2895 assert(NewVT.isValid() && 2896 "Expecting equally-sized RVV vector types to be legal"); 2897 SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(), 2898 Load->getPointerInfo(), Load->getOriginalAlign(), 2899 Load->getMemOperand()->getFlags()); 2900 return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL); 2901 } 2902 2903 // While RVV has alignment restrictions, we should always be able to store as a 2904 // legal equivalently-sized byte-typed vector instead. This method is 2905 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It 2906 // returns SDValue() if the store is already correctly aligned. 2907 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op, 2908 SelectionDAG &DAG) const { 2909 auto *Store = cast<StoreSDNode>(Op); 2910 assert(Store && Store->getValue().getValueType().isVector() && 2911 "Expected vector store"); 2912 2913 if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 2914 Store->getMemoryVT(), 2915 *Store->getMemOperand())) 2916 return SDValue(); 2917 2918 SDLoc DL(Op); 2919 SDValue StoredVal = Store->getValue(); 2920 MVT VT = StoredVal.getSimpleValueType(); 2921 unsigned EltSizeBits = VT.getScalarSizeInBits(); 2922 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) && 2923 "Unexpected unaligned RVV store type"); 2924 MVT NewVT = 2925 MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8)); 2926 assert(NewVT.isValid() && 2927 "Expecting equally-sized RVV vector types to be legal"); 2928 StoredVal = DAG.getBitcast(NewVT, StoredVal); 2929 return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(), 2930 Store->getPointerInfo(), Store->getOriginalAlign(), 2931 Store->getMemOperand()->getFlags()); 2932 } 2933 2934 SDValue RISCVTargetLowering::LowerOperation(SDValue Op, 2935 SelectionDAG &DAG) const { 2936 switch (Op.getOpcode()) { 2937 default: 2938 report_fatal_error("unimplemented operand"); 2939 case ISD::GlobalAddress: 2940 return lowerGlobalAddress(Op, DAG); 2941 case ISD::BlockAddress: 2942 return lowerBlockAddress(Op, DAG); 2943 case ISD::ConstantPool: 2944 return lowerConstantPool(Op, DAG); 2945 case ISD::JumpTable: 2946 return lowerJumpTable(Op, DAG); 2947 case ISD::GlobalTLSAddress: 2948 return lowerGlobalTLSAddress(Op, DAG); 2949 case ISD::SELECT: 2950 return lowerSELECT(Op, DAG); 2951 case ISD::BRCOND: 2952 return lowerBRCOND(Op, DAG); 2953 case ISD::VASTART: 2954 return lowerVASTART(Op, DAG); 2955 case ISD::FRAMEADDR: 2956 return lowerFRAMEADDR(Op, DAG); 2957 case ISD::RETURNADDR: 2958 return lowerRETURNADDR(Op, DAG); 2959 case ISD::SHL_PARTS: 2960 return lowerShiftLeftParts(Op, DAG); 2961 case ISD::SRA_PARTS: 2962 return lowerShiftRightParts(Op, DAG, true); 2963 case ISD::SRL_PARTS: 2964 return lowerShiftRightParts(Op, DAG, false); 2965 case ISD::BITCAST: { 2966 SDLoc DL(Op); 2967 EVT VT = Op.getValueType(); 2968 SDValue Op0 = Op.getOperand(0); 2969 EVT Op0VT = Op0.getValueType(); 2970 MVT XLenVT = Subtarget.getXLenVT(); 2971 if (VT.isFixedLengthVector()) { 2972 // We can handle fixed length vector bitcasts with a simple replacement 2973 // in isel. 2974 if (Op0VT.isFixedLengthVector()) 2975 return Op; 2976 // When bitcasting from scalar to fixed-length vector, insert the scalar 2977 // into a one-element vector of the result type, and perform a vector 2978 // bitcast. 2979 if (!Op0VT.isVector()) { 2980 EVT BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1); 2981 if (!isTypeLegal(BVT)) 2982 return SDValue(); 2983 return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT, 2984 DAG.getUNDEF(BVT), Op0, 2985 DAG.getConstant(0, DL, XLenVT))); 2986 } 2987 return SDValue(); 2988 } 2989 // Custom-legalize bitcasts from fixed-length vector types to scalar types 2990 // thus: bitcast the vector to a one-element vector type whose element type 2991 // is the same as the result type, and extract the first element. 2992 if (!VT.isVector() && Op0VT.isFixedLengthVector()) { 2993 EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1); 2994 if (!isTypeLegal(BVT)) 2995 return SDValue(); 2996 SDValue BVec = DAG.getBitcast(BVT, Op0); 2997 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec, 2998 DAG.getConstant(0, DL, XLenVT)); 2999 } 3000 if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) { 3001 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0); 3002 SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0); 3003 return FPConv; 3004 } 3005 if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() && 3006 Subtarget.hasStdExtF()) { 3007 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0); 3008 SDValue FPConv = 3009 DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0); 3010 return FPConv; 3011 } 3012 return SDValue(); 3013 } 3014 case ISD::INTRINSIC_WO_CHAIN: 3015 return LowerINTRINSIC_WO_CHAIN(Op, DAG); 3016 case ISD::INTRINSIC_W_CHAIN: 3017 return LowerINTRINSIC_W_CHAIN(Op, DAG); 3018 case ISD::INTRINSIC_VOID: 3019 return LowerINTRINSIC_VOID(Op, DAG); 3020 case ISD::BSWAP: 3021 case ISD::BITREVERSE: { 3022 MVT VT = Op.getSimpleValueType(); 3023 SDLoc DL(Op); 3024 if (Subtarget.hasStdExtZbp()) { 3025 // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining. 3026 // Start with the maximum immediate value which is the bitwidth - 1. 3027 unsigned Imm = VT.getSizeInBits() - 1; 3028 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. 3029 if (Op.getOpcode() == ISD::BSWAP) 3030 Imm &= ~0x7U; 3031 return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0), 3032 DAG.getConstant(Imm, DL, VT)); 3033 } 3034 assert(Subtarget.hasStdExtZbkb() && "Unexpected custom legalization"); 3035 assert(Op.getOpcode() == ISD::BITREVERSE && "Unexpected opcode"); 3036 // Expand bitreverse to a bswap(rev8) followed by brev8. 3037 SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, Op.getOperand(0)); 3038 // We use the Zbp grevi encoding for rev.b/brev8 which will be recognized 3039 // as brev8 by an isel pattern. 3040 return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap, 3041 DAG.getConstant(7, DL, VT)); 3042 } 3043 case ISD::FSHL: 3044 case ISD::FSHR: { 3045 MVT VT = Op.getSimpleValueType(); 3046 assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization"); 3047 SDLoc DL(Op); 3048 // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only 3049 // use log(XLen) bits. Mask the shift amount accordingly to prevent 3050 // accidentally setting the extra bit. 3051 unsigned ShAmtWidth = Subtarget.getXLen() - 1; 3052 SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2), 3053 DAG.getConstant(ShAmtWidth, DL, VT)); 3054 // fshl and fshr concatenate their operands in the same order. fsr and fsl 3055 // instruction use different orders. fshl will return its first operand for 3056 // shift of zero, fshr will return its second operand. fsl and fsr both 3057 // return rs1 so the ISD nodes need to have different operand orders. 3058 // Shift amount is in rs2. 3059 SDValue Op0 = Op.getOperand(0); 3060 SDValue Op1 = Op.getOperand(1); 3061 unsigned Opc = RISCVISD::FSL; 3062 if (Op.getOpcode() == ISD::FSHR) { 3063 std::swap(Op0, Op1); 3064 Opc = RISCVISD::FSR; 3065 } 3066 return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt); 3067 } 3068 case ISD::TRUNCATE: 3069 // Only custom-lower vector truncates 3070 if (!Op.getSimpleValueType().isVector()) 3071 return Op; 3072 return lowerVectorTruncLike(Op, DAG); 3073 case ISD::ANY_EXTEND: 3074 case ISD::ZERO_EXTEND: 3075 if (Op.getOperand(0).getValueType().isVector() && 3076 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 3077 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1); 3078 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL); 3079 case ISD::SIGN_EXTEND: 3080 if (Op.getOperand(0).getValueType().isVector() && 3081 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 3082 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1); 3083 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL); 3084 case ISD::SPLAT_VECTOR_PARTS: 3085 return lowerSPLAT_VECTOR_PARTS(Op, DAG); 3086 case ISD::INSERT_VECTOR_ELT: 3087 return lowerINSERT_VECTOR_ELT(Op, DAG); 3088 case ISD::EXTRACT_VECTOR_ELT: 3089 return lowerEXTRACT_VECTOR_ELT(Op, DAG); 3090 case ISD::VSCALE: { 3091 MVT VT = Op.getSimpleValueType(); 3092 SDLoc DL(Op); 3093 SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT); 3094 // We define our scalable vector types for lmul=1 to use a 64 bit known 3095 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate 3096 // vscale as VLENB / 8. 3097 static_assert(RISCV::RVVBitsPerBlock == 64, "Unexpected bits per block!"); 3098 if (Subtarget.getMinVLen() < RISCV::RVVBitsPerBlock) 3099 report_fatal_error("Support for VLEN==32 is incomplete."); 3100 if (isa<ConstantSDNode>(Op.getOperand(0))) { 3101 // We assume VLENB is a multiple of 8. We manually choose the best shift 3102 // here because SimplifyDemandedBits isn't always able to simplify it. 3103 uint64_t Val = Op.getConstantOperandVal(0); 3104 if (isPowerOf2_64(Val)) { 3105 uint64_t Log2 = Log2_64(Val); 3106 if (Log2 < 3) 3107 return DAG.getNode(ISD::SRL, DL, VT, VLENB, 3108 DAG.getConstant(3 - Log2, DL, VT)); 3109 if (Log2 > 3) 3110 return DAG.getNode(ISD::SHL, DL, VT, VLENB, 3111 DAG.getConstant(Log2 - 3, DL, VT)); 3112 return VLENB; 3113 } 3114 // If the multiplier is a multiple of 8, scale it down to avoid needing 3115 // to shift the VLENB value. 3116 if ((Val % 8) == 0) 3117 return DAG.getNode(ISD::MUL, DL, VT, VLENB, 3118 DAG.getConstant(Val / 8, DL, VT)); 3119 } 3120 3121 SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB, 3122 DAG.getConstant(3, DL, VT)); 3123 return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0)); 3124 } 3125 case ISD::FPOWI: { 3126 // Custom promote f16 powi with illegal i32 integer type on RV64. Once 3127 // promoted this will be legalized into a libcall by LegalizeIntegerTypes. 3128 if (Op.getValueType() == MVT::f16 && Subtarget.is64Bit() && 3129 Op.getOperand(1).getValueType() == MVT::i32) { 3130 SDLoc DL(Op); 3131 SDValue Op0 = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Op.getOperand(0)); 3132 SDValue Powi = 3133 DAG.getNode(ISD::FPOWI, DL, MVT::f32, Op0, Op.getOperand(1)); 3134 return DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, Powi, 3135 DAG.getIntPtrConstant(0, DL)); 3136 } 3137 return SDValue(); 3138 } 3139 case ISD::FP_EXTEND: 3140 case ISD::FP_ROUND: 3141 if (!Op.getValueType().isVector()) 3142 return Op; 3143 return lowerVectorFPExtendOrRoundLike(Op, DAG); 3144 case ISD::FP_TO_SINT: 3145 case ISD::FP_TO_UINT: 3146 case ISD::SINT_TO_FP: 3147 case ISD::UINT_TO_FP: { 3148 // RVV can only do fp<->int conversions to types half/double the size as 3149 // the source. We custom-lower any conversions that do two hops into 3150 // sequences. 3151 MVT VT = Op.getSimpleValueType(); 3152 if (!VT.isVector()) 3153 return Op; 3154 SDLoc DL(Op); 3155 SDValue Src = Op.getOperand(0); 3156 MVT EltVT = VT.getVectorElementType(); 3157 MVT SrcVT = Src.getSimpleValueType(); 3158 MVT SrcEltVT = SrcVT.getVectorElementType(); 3159 unsigned EltSize = EltVT.getSizeInBits(); 3160 unsigned SrcEltSize = SrcEltVT.getSizeInBits(); 3161 assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) && 3162 "Unexpected vector element types"); 3163 3164 bool IsInt2FP = SrcEltVT.isInteger(); 3165 // Widening conversions 3166 if (EltSize > (2 * SrcEltSize)) { 3167 if (IsInt2FP) { 3168 // Do a regular integer sign/zero extension then convert to float. 3169 MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), 3170 VT.getVectorElementCount()); 3171 unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP 3172 ? ISD::ZERO_EXTEND 3173 : ISD::SIGN_EXTEND; 3174 SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src); 3175 return DAG.getNode(Op.getOpcode(), DL, VT, Ext); 3176 } 3177 // FP2Int 3178 assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering"); 3179 // Do one doubling fp_extend then complete the operation by converting 3180 // to int. 3181 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 3182 SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT); 3183 return DAG.getNode(Op.getOpcode(), DL, VT, FExt); 3184 } 3185 3186 // Narrowing conversions 3187 if (SrcEltSize > (2 * EltSize)) { 3188 if (IsInt2FP) { 3189 // One narrowing int_to_fp, then an fp_round. 3190 assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering"); 3191 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount()); 3192 SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src); 3193 return DAG.getFPExtendOrRound(Int2FP, DL, VT); 3194 } 3195 // FP2Int 3196 // One narrowing fp_to_int, then truncate the integer. If the float isn't 3197 // representable by the integer, the result is poison. 3198 MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2), 3199 VT.getVectorElementCount()); 3200 SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src); 3201 return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int); 3202 } 3203 3204 // Scalable vectors can exit here. Patterns will handle equally-sized 3205 // conversions halving/doubling ones. 3206 if (!VT.isFixedLengthVector()) 3207 return Op; 3208 3209 // For fixed-length vectors we lower to a custom "VL" node. 3210 unsigned RVVOpc = 0; 3211 switch (Op.getOpcode()) { 3212 default: 3213 llvm_unreachable("Impossible opcode"); 3214 case ISD::FP_TO_SINT: 3215 RVVOpc = RISCVISD::FP_TO_SINT_VL; 3216 break; 3217 case ISD::FP_TO_UINT: 3218 RVVOpc = RISCVISD::FP_TO_UINT_VL; 3219 break; 3220 case ISD::SINT_TO_FP: 3221 RVVOpc = RISCVISD::SINT_TO_FP_VL; 3222 break; 3223 case ISD::UINT_TO_FP: 3224 RVVOpc = RISCVISD::UINT_TO_FP_VL; 3225 break; 3226 } 3227 3228 MVT ContainerVT, SrcContainerVT; 3229 // Derive the reference container type from the larger vector type. 3230 if (SrcEltSize > EltSize) { 3231 SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 3232 ContainerVT = 3233 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 3234 } else { 3235 ContainerVT = getContainerForFixedLengthVector(VT); 3236 SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT); 3237 } 3238 3239 SDValue Mask, VL; 3240 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 3241 3242 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 3243 Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL); 3244 return convertFromScalableVector(VT, Src, DAG, Subtarget); 3245 } 3246 case ISD::FP_TO_SINT_SAT: 3247 case ISD::FP_TO_UINT_SAT: 3248 return lowerFP_TO_INT_SAT(Op, DAG, Subtarget); 3249 case ISD::FTRUNC: 3250 case ISD::FCEIL: 3251 case ISD::FFLOOR: 3252 return lowerFTRUNC_FCEIL_FFLOOR(Op, DAG); 3253 case ISD::FROUND: 3254 return lowerFROUND(Op, DAG); 3255 case ISD::VECREDUCE_ADD: 3256 case ISD::VECREDUCE_UMAX: 3257 case ISD::VECREDUCE_SMAX: 3258 case ISD::VECREDUCE_UMIN: 3259 case ISD::VECREDUCE_SMIN: 3260 return lowerVECREDUCE(Op, DAG); 3261 case ISD::VECREDUCE_AND: 3262 case ISD::VECREDUCE_OR: 3263 case ISD::VECREDUCE_XOR: 3264 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1) 3265 return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false); 3266 return lowerVECREDUCE(Op, DAG); 3267 case ISD::VECREDUCE_FADD: 3268 case ISD::VECREDUCE_SEQ_FADD: 3269 case ISD::VECREDUCE_FMIN: 3270 case ISD::VECREDUCE_FMAX: 3271 return lowerFPVECREDUCE(Op, DAG); 3272 case ISD::VP_REDUCE_ADD: 3273 case ISD::VP_REDUCE_UMAX: 3274 case ISD::VP_REDUCE_SMAX: 3275 case ISD::VP_REDUCE_UMIN: 3276 case ISD::VP_REDUCE_SMIN: 3277 case ISD::VP_REDUCE_FADD: 3278 case ISD::VP_REDUCE_SEQ_FADD: 3279 case ISD::VP_REDUCE_FMIN: 3280 case ISD::VP_REDUCE_FMAX: 3281 return lowerVPREDUCE(Op, DAG); 3282 case ISD::VP_REDUCE_AND: 3283 case ISD::VP_REDUCE_OR: 3284 case ISD::VP_REDUCE_XOR: 3285 if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1) 3286 return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true); 3287 return lowerVPREDUCE(Op, DAG); 3288 case ISD::INSERT_SUBVECTOR: 3289 return lowerINSERT_SUBVECTOR(Op, DAG); 3290 case ISD::EXTRACT_SUBVECTOR: 3291 return lowerEXTRACT_SUBVECTOR(Op, DAG); 3292 case ISD::STEP_VECTOR: 3293 return lowerSTEP_VECTOR(Op, DAG); 3294 case ISD::VECTOR_REVERSE: 3295 return lowerVECTOR_REVERSE(Op, DAG); 3296 case ISD::VECTOR_SPLICE: 3297 return lowerVECTOR_SPLICE(Op, DAG); 3298 case ISD::BUILD_VECTOR: 3299 return lowerBUILD_VECTOR(Op, DAG, Subtarget); 3300 case ISD::SPLAT_VECTOR: 3301 if (Op.getValueType().getVectorElementType() == MVT::i1) 3302 return lowerVectorMaskSplat(Op, DAG); 3303 return SDValue(); 3304 case ISD::VECTOR_SHUFFLE: 3305 return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget); 3306 case ISD::CONCAT_VECTORS: { 3307 // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is 3308 // better than going through the stack, as the default expansion does. 3309 SDLoc DL(Op); 3310 MVT VT = Op.getSimpleValueType(); 3311 unsigned NumOpElts = 3312 Op.getOperand(0).getSimpleValueType().getVectorMinNumElements(); 3313 SDValue Vec = DAG.getUNDEF(VT); 3314 for (const auto &OpIdx : enumerate(Op->ops())) { 3315 SDValue SubVec = OpIdx.value(); 3316 // Don't insert undef subvectors. 3317 if (SubVec.isUndef()) 3318 continue; 3319 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, SubVec, 3320 DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL)); 3321 } 3322 return Vec; 3323 } 3324 case ISD::LOAD: 3325 if (auto V = expandUnalignedRVVLoad(Op, DAG)) 3326 return V; 3327 if (Op.getValueType().isFixedLengthVector()) 3328 return lowerFixedLengthVectorLoadToRVV(Op, DAG); 3329 return Op; 3330 case ISD::STORE: 3331 if (auto V = expandUnalignedRVVStore(Op, DAG)) 3332 return V; 3333 if (Op.getOperand(1).getValueType().isFixedLengthVector()) 3334 return lowerFixedLengthVectorStoreToRVV(Op, DAG); 3335 return Op; 3336 case ISD::MLOAD: 3337 case ISD::VP_LOAD: 3338 return lowerMaskedLoad(Op, DAG); 3339 case ISD::MSTORE: 3340 case ISD::VP_STORE: 3341 return lowerMaskedStore(Op, DAG); 3342 case ISD::SETCC: 3343 return lowerFixedLengthVectorSetccToRVV(Op, DAG); 3344 case ISD::ADD: 3345 return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL); 3346 case ISD::SUB: 3347 return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL); 3348 case ISD::MUL: 3349 return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL); 3350 case ISD::MULHS: 3351 return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL); 3352 case ISD::MULHU: 3353 return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL); 3354 case ISD::AND: 3355 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL, 3356 RISCVISD::AND_VL); 3357 case ISD::OR: 3358 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL, 3359 RISCVISD::OR_VL); 3360 case ISD::XOR: 3361 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL, 3362 RISCVISD::XOR_VL); 3363 case ISD::SDIV: 3364 return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL); 3365 case ISD::SREM: 3366 return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL); 3367 case ISD::UDIV: 3368 return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL); 3369 case ISD::UREM: 3370 return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL); 3371 case ISD::SHL: 3372 case ISD::SRA: 3373 case ISD::SRL: 3374 if (Op.getSimpleValueType().isFixedLengthVector()) 3375 return lowerFixedLengthVectorShiftToRVV(Op, DAG); 3376 // This can be called for an i32 shift amount that needs to be promoted. 3377 assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() && 3378 "Unexpected custom legalisation"); 3379 return SDValue(); 3380 case ISD::SADDSAT: 3381 return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL); 3382 case ISD::UADDSAT: 3383 return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL); 3384 case ISD::SSUBSAT: 3385 return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL); 3386 case ISD::USUBSAT: 3387 return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL); 3388 case ISD::FADD: 3389 return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL); 3390 case ISD::FSUB: 3391 return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL); 3392 case ISD::FMUL: 3393 return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL); 3394 case ISD::FDIV: 3395 return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL); 3396 case ISD::FNEG: 3397 return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL); 3398 case ISD::FABS: 3399 return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL); 3400 case ISD::FSQRT: 3401 return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL); 3402 case ISD::FMA: 3403 return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL); 3404 case ISD::SMIN: 3405 return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL); 3406 case ISD::SMAX: 3407 return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL); 3408 case ISD::UMIN: 3409 return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL); 3410 case ISD::UMAX: 3411 return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL); 3412 case ISD::FMINNUM: 3413 return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL); 3414 case ISD::FMAXNUM: 3415 return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL); 3416 case ISD::ABS: 3417 return lowerABS(Op, DAG); 3418 case ISD::CTLZ_ZERO_UNDEF: 3419 case ISD::CTTZ_ZERO_UNDEF: 3420 return lowerCTLZ_CTTZ_ZERO_UNDEF(Op, DAG); 3421 case ISD::VSELECT: 3422 return lowerFixedLengthVectorSelectToRVV(Op, DAG); 3423 case ISD::FCOPYSIGN: 3424 return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG); 3425 case ISD::MGATHER: 3426 case ISD::VP_GATHER: 3427 return lowerMaskedGather(Op, DAG); 3428 case ISD::MSCATTER: 3429 case ISD::VP_SCATTER: 3430 return lowerMaskedScatter(Op, DAG); 3431 case ISD::FLT_ROUNDS_: 3432 return lowerGET_ROUNDING(Op, DAG); 3433 case ISD::SET_ROUNDING: 3434 return lowerSET_ROUNDING(Op, DAG); 3435 case ISD::VP_SELECT: 3436 return lowerVPOp(Op, DAG, RISCVISD::VSELECT_VL); 3437 case ISD::VP_MERGE: 3438 return lowerVPOp(Op, DAG, RISCVISD::VP_MERGE_VL); 3439 case ISD::VP_ADD: 3440 return lowerVPOp(Op, DAG, RISCVISD::ADD_VL); 3441 case ISD::VP_SUB: 3442 return lowerVPOp(Op, DAG, RISCVISD::SUB_VL); 3443 case ISD::VP_MUL: 3444 return lowerVPOp(Op, DAG, RISCVISD::MUL_VL); 3445 case ISD::VP_SDIV: 3446 return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL); 3447 case ISD::VP_UDIV: 3448 return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL); 3449 case ISD::VP_SREM: 3450 return lowerVPOp(Op, DAG, RISCVISD::SREM_VL); 3451 case ISD::VP_UREM: 3452 return lowerVPOp(Op, DAG, RISCVISD::UREM_VL); 3453 case ISD::VP_AND: 3454 return lowerLogicVPOp(Op, DAG, RISCVISD::VMAND_VL, RISCVISD::AND_VL); 3455 case ISD::VP_OR: 3456 return lowerLogicVPOp(Op, DAG, RISCVISD::VMOR_VL, RISCVISD::OR_VL); 3457 case ISD::VP_XOR: 3458 return lowerLogicVPOp(Op, DAG, RISCVISD::VMXOR_VL, RISCVISD::XOR_VL); 3459 case ISD::VP_ASHR: 3460 return lowerVPOp(Op, DAG, RISCVISD::SRA_VL); 3461 case ISD::VP_LSHR: 3462 return lowerVPOp(Op, DAG, RISCVISD::SRL_VL); 3463 case ISD::VP_SHL: 3464 return lowerVPOp(Op, DAG, RISCVISD::SHL_VL); 3465 case ISD::VP_FADD: 3466 return lowerVPOp(Op, DAG, RISCVISD::FADD_VL); 3467 case ISD::VP_FSUB: 3468 return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL); 3469 case ISD::VP_FMUL: 3470 return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL); 3471 case ISD::VP_FDIV: 3472 return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL); 3473 case ISD::VP_FNEG: 3474 return lowerVPOp(Op, DAG, RISCVISD::FNEG_VL); 3475 case ISD::VP_FMA: 3476 return lowerVPOp(Op, DAG, RISCVISD::FMA_VL); 3477 case ISD::VP_SIGN_EXTEND: 3478 case ISD::VP_ZERO_EXTEND: 3479 if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1) 3480 return lowerVPExtMaskOp(Op, DAG); 3481 return lowerVPOp(Op, DAG, 3482 Op.getOpcode() == ISD::VP_SIGN_EXTEND 3483 ? RISCVISD::VSEXT_VL 3484 : RISCVISD::VZEXT_VL); 3485 case ISD::VP_TRUNCATE: 3486 return lowerVectorTruncLike(Op, DAG); 3487 case ISD::VP_FP_EXTEND: 3488 case ISD::VP_FP_ROUND: 3489 return lowerVectorFPExtendOrRoundLike(Op, DAG); 3490 case ISD::VP_FPTOSI: 3491 return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL); 3492 case ISD::VP_FPTOUI: 3493 return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_UINT_VL); 3494 case ISD::VP_SITOFP: 3495 return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL); 3496 case ISD::VP_UITOFP: 3497 return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL); 3498 case ISD::VP_SETCC: 3499 if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1) 3500 return lowerVPSetCCMaskOp(Op, DAG); 3501 return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL); 3502 } 3503 } 3504 3505 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty, 3506 SelectionDAG &DAG, unsigned Flags) { 3507 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags); 3508 } 3509 3510 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty, 3511 SelectionDAG &DAG, unsigned Flags) { 3512 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(), 3513 Flags); 3514 } 3515 3516 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty, 3517 SelectionDAG &DAG, unsigned Flags) { 3518 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(), 3519 N->getOffset(), Flags); 3520 } 3521 3522 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty, 3523 SelectionDAG &DAG, unsigned Flags) { 3524 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags); 3525 } 3526 3527 template <class NodeTy> 3528 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 3529 bool IsLocal) const { 3530 SDLoc DL(N); 3531 EVT Ty = getPointerTy(DAG.getDataLayout()); 3532 3533 if (isPositionIndependent()) { 3534 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 3535 if (IsLocal) 3536 // Use PC-relative addressing to access the symbol. This generates the 3537 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym)) 3538 // %pcrel_lo(auipc)). 3539 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 3540 3541 // Use PC-relative addressing to access the GOT for this symbol, then load 3542 // the address from the GOT. This generates the pattern (PseudoLA sym), 3543 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))). 3544 SDValue Load = 3545 SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0); 3546 MachineFunction &MF = DAG.getMachineFunction(); 3547 MachineMemOperand *MemOp = MF.getMachineMemOperand( 3548 MachinePointerInfo::getGOT(MF), 3549 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 3550 MachineMemOperand::MOInvariant, 3551 LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8)); 3552 DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp}); 3553 return Load; 3554 } 3555 3556 switch (getTargetMachine().getCodeModel()) { 3557 default: 3558 report_fatal_error("Unsupported code model for lowering"); 3559 case CodeModel::Small: { 3560 // Generate a sequence for accessing addresses within the first 2 GiB of 3561 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)). 3562 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI); 3563 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO); 3564 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 3565 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0); 3566 } 3567 case CodeModel::Medium: { 3568 // Generate a sequence for accessing addresses within any 2GiB range within 3569 // the address space. This generates the pattern (PseudoLLA sym), which 3570 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)). 3571 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0); 3572 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0); 3573 } 3574 } 3575 } 3576 3577 template SDValue RISCVTargetLowering::getAddr<GlobalAddressSDNode>( 3578 GlobalAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const; 3579 template SDValue RISCVTargetLowering::getAddr<BlockAddressSDNode>( 3580 BlockAddressSDNode *N, SelectionDAG &DAG, bool IsLocal) const; 3581 template SDValue RISCVTargetLowering::getAddr<ConstantPoolSDNode>( 3582 ConstantPoolSDNode *N, SelectionDAG &DAG, bool IsLocal) const; 3583 template SDValue RISCVTargetLowering::getAddr<JumpTableSDNode>( 3584 JumpTableSDNode *N, SelectionDAG &DAG, bool IsLocal) const; 3585 3586 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op, 3587 SelectionDAG &DAG) const { 3588 SDLoc DL(Op); 3589 EVT Ty = Op.getValueType(); 3590 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 3591 int64_t Offset = N->getOffset(); 3592 MVT XLenVT = Subtarget.getXLenVT(); 3593 3594 const GlobalValue *GV = N->getGlobal(); 3595 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV); 3596 SDValue Addr = getAddr(N, DAG, IsLocal); 3597 3598 // In order to maximise the opportunity for common subexpression elimination, 3599 // emit a separate ADD node for the global address offset instead of folding 3600 // it in the global address node. Later peephole optimisations may choose to 3601 // fold it back in when profitable. 3602 if (Offset != 0) 3603 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 3604 DAG.getConstant(Offset, DL, XLenVT)); 3605 return Addr; 3606 } 3607 3608 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op, 3609 SelectionDAG &DAG) const { 3610 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op); 3611 3612 return getAddr(N, DAG); 3613 } 3614 3615 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op, 3616 SelectionDAG &DAG) const { 3617 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); 3618 3619 return getAddr(N, DAG); 3620 } 3621 3622 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op, 3623 SelectionDAG &DAG) const { 3624 JumpTableSDNode *N = cast<JumpTableSDNode>(Op); 3625 3626 return getAddr(N, DAG); 3627 } 3628 3629 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N, 3630 SelectionDAG &DAG, 3631 bool UseGOT) const { 3632 SDLoc DL(N); 3633 EVT Ty = getPointerTy(DAG.getDataLayout()); 3634 const GlobalValue *GV = N->getGlobal(); 3635 MVT XLenVT = Subtarget.getXLenVT(); 3636 3637 if (UseGOT) { 3638 // Use PC-relative addressing to access the GOT for this TLS symbol, then 3639 // load the address from the GOT and add the thread pointer. This generates 3640 // the pattern (PseudoLA_TLS_IE sym), which expands to 3641 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)). 3642 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 3643 SDValue Load = 3644 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0); 3645 MachineFunction &MF = DAG.getMachineFunction(); 3646 MachineMemOperand *MemOp = MF.getMachineMemOperand( 3647 MachinePointerInfo::getGOT(MF), 3648 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable | 3649 MachineMemOperand::MOInvariant, 3650 LLT(Ty.getSimpleVT()), Align(Ty.getFixedSizeInBits() / 8)); 3651 DAG.setNodeMemRefs(cast<MachineSDNode>(Load.getNode()), {MemOp}); 3652 3653 // Add the thread pointer. 3654 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 3655 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg); 3656 } 3657 3658 // Generate a sequence for accessing the address relative to the thread 3659 // pointer, with the appropriate adjustment for the thread pointer offset. 3660 // This generates the pattern 3661 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym)) 3662 SDValue AddrHi = 3663 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI); 3664 SDValue AddrAdd = 3665 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD); 3666 SDValue AddrLo = 3667 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO); 3668 3669 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0); 3670 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT); 3671 SDValue MNAdd = SDValue( 3672 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd), 3673 0); 3674 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0); 3675 } 3676 3677 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N, 3678 SelectionDAG &DAG) const { 3679 SDLoc DL(N); 3680 EVT Ty = getPointerTy(DAG.getDataLayout()); 3681 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits()); 3682 const GlobalValue *GV = N->getGlobal(); 3683 3684 // Use a PC-relative addressing mode to access the global dynamic GOT address. 3685 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to 3686 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)). 3687 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0); 3688 SDValue Load = 3689 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0); 3690 3691 // Prepare argument list to generate call. 3692 ArgListTy Args; 3693 ArgListEntry Entry; 3694 Entry.Node = Load; 3695 Entry.Ty = CallTy; 3696 Args.push_back(Entry); 3697 3698 // Setup call to __tls_get_addr. 3699 TargetLowering::CallLoweringInfo CLI(DAG); 3700 CLI.setDebugLoc(DL) 3701 .setChain(DAG.getEntryNode()) 3702 .setLibCallee(CallingConv::C, CallTy, 3703 DAG.getExternalSymbol("__tls_get_addr", Ty), 3704 std::move(Args)); 3705 3706 return LowerCallTo(CLI).first; 3707 } 3708 3709 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op, 3710 SelectionDAG &DAG) const { 3711 SDLoc DL(Op); 3712 EVT Ty = Op.getValueType(); 3713 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op); 3714 int64_t Offset = N->getOffset(); 3715 MVT XLenVT = Subtarget.getXLenVT(); 3716 3717 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal()); 3718 3719 if (DAG.getMachineFunction().getFunction().getCallingConv() == 3720 CallingConv::GHC) 3721 report_fatal_error("In GHC calling convention TLS is not supported"); 3722 3723 SDValue Addr; 3724 switch (Model) { 3725 case TLSModel::LocalExec: 3726 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false); 3727 break; 3728 case TLSModel::InitialExec: 3729 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true); 3730 break; 3731 case TLSModel::LocalDynamic: 3732 case TLSModel::GeneralDynamic: 3733 Addr = getDynamicTLSAddr(N, DAG); 3734 break; 3735 } 3736 3737 // In order to maximise the opportunity for common subexpression elimination, 3738 // emit a separate ADD node for the global address offset instead of folding 3739 // it in the global address node. Later peephole optimisations may choose to 3740 // fold it back in when profitable. 3741 if (Offset != 0) 3742 return DAG.getNode(ISD::ADD, DL, Ty, Addr, 3743 DAG.getConstant(Offset, DL, XLenVT)); 3744 return Addr; 3745 } 3746 3747 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const { 3748 SDValue CondV = Op.getOperand(0); 3749 SDValue TrueV = Op.getOperand(1); 3750 SDValue FalseV = Op.getOperand(2); 3751 SDLoc DL(Op); 3752 MVT VT = Op.getSimpleValueType(); 3753 MVT XLenVT = Subtarget.getXLenVT(); 3754 3755 // Lower vector SELECTs to VSELECTs by splatting the condition. 3756 if (VT.isVector()) { 3757 MVT SplatCondVT = VT.changeVectorElementType(MVT::i1); 3758 SDValue CondSplat = VT.isScalableVector() 3759 ? DAG.getSplatVector(SplatCondVT, DL, CondV) 3760 : DAG.getSplatBuildVector(SplatCondVT, DL, CondV); 3761 return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV); 3762 } 3763 3764 // If the result type is XLenVT and CondV is the output of a SETCC node 3765 // which also operated on XLenVT inputs, then merge the SETCC node into the 3766 // lowered RISCVISD::SELECT_CC to take advantage of the integer 3767 // compare+branch instructions. i.e.: 3768 // (select (setcc lhs, rhs, cc), truev, falsev) 3769 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev) 3770 if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC && 3771 CondV.getOperand(0).getSimpleValueType() == XLenVT) { 3772 SDValue LHS = CondV.getOperand(0); 3773 SDValue RHS = CondV.getOperand(1); 3774 const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2)); 3775 ISD::CondCode CCVal = CC->get(); 3776 3777 // Special case for a select of 2 constants that have a diffence of 1. 3778 // Normally this is done by DAGCombine, but if the select is introduced by 3779 // type legalization or op legalization, we miss it. Restricting to SETLT 3780 // case for now because that is what signed saturating add/sub need. 3781 // FIXME: We don't need the condition to be SETLT or even a SETCC, 3782 // but we would probably want to swap the true/false values if the condition 3783 // is SETGE/SETLE to avoid an XORI. 3784 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) && 3785 CCVal == ISD::SETLT) { 3786 const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue(); 3787 const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue(); 3788 if (TrueVal - 1 == FalseVal) 3789 return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV); 3790 if (TrueVal + 1 == FalseVal) 3791 return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV); 3792 } 3793 3794 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 3795 3796 SDValue TargetCC = DAG.getCondCode(CCVal); 3797 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV}; 3798 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 3799 } 3800 3801 // Otherwise: 3802 // (select condv, truev, falsev) 3803 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev) 3804 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 3805 SDValue SetNE = DAG.getCondCode(ISD::SETNE); 3806 3807 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV}; 3808 3809 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops); 3810 } 3811 3812 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 3813 SDValue CondV = Op.getOperand(1); 3814 SDLoc DL(Op); 3815 MVT XLenVT = Subtarget.getXLenVT(); 3816 3817 if (CondV.getOpcode() == ISD::SETCC && 3818 CondV.getOperand(0).getValueType() == XLenVT) { 3819 SDValue LHS = CondV.getOperand(0); 3820 SDValue RHS = CondV.getOperand(1); 3821 ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get(); 3822 3823 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 3824 3825 SDValue TargetCC = DAG.getCondCode(CCVal); 3826 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), 3827 LHS, RHS, TargetCC, Op.getOperand(2)); 3828 } 3829 3830 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0), 3831 CondV, DAG.getConstant(0, DL, XLenVT), 3832 DAG.getCondCode(ISD::SETNE), Op.getOperand(2)); 3833 } 3834 3835 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const { 3836 MachineFunction &MF = DAG.getMachineFunction(); 3837 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>(); 3838 3839 SDLoc DL(Op); 3840 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), 3841 getPointerTy(MF.getDataLayout())); 3842 3843 // vastart just stores the address of the VarArgsFrameIndex slot into the 3844 // memory location argument. 3845 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 3846 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1), 3847 MachinePointerInfo(SV)); 3848 } 3849 3850 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op, 3851 SelectionDAG &DAG) const { 3852 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 3853 MachineFunction &MF = DAG.getMachineFunction(); 3854 MachineFrameInfo &MFI = MF.getFrameInfo(); 3855 MFI.setFrameAddressIsTaken(true); 3856 Register FrameReg = RI.getFrameRegister(MF); 3857 int XLenInBytes = Subtarget.getXLen() / 8; 3858 3859 EVT VT = Op.getValueType(); 3860 SDLoc DL(Op); 3861 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT); 3862 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3863 while (Depth--) { 3864 int Offset = -(XLenInBytes * 2); 3865 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr, 3866 DAG.getIntPtrConstant(Offset, DL)); 3867 FrameAddr = 3868 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo()); 3869 } 3870 return FrameAddr; 3871 } 3872 3873 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op, 3874 SelectionDAG &DAG) const { 3875 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo(); 3876 MachineFunction &MF = DAG.getMachineFunction(); 3877 MachineFrameInfo &MFI = MF.getFrameInfo(); 3878 MFI.setReturnAddressIsTaken(true); 3879 MVT XLenVT = Subtarget.getXLenVT(); 3880 int XLenInBytes = Subtarget.getXLen() / 8; 3881 3882 if (verifyReturnAddressArgumentIsConstant(Op, DAG)) 3883 return SDValue(); 3884 3885 EVT VT = Op.getValueType(); 3886 SDLoc DL(Op); 3887 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 3888 if (Depth) { 3889 int Off = -XLenInBytes; 3890 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG); 3891 SDValue Offset = DAG.getConstant(Off, DL, VT); 3892 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 3893 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 3894 MachinePointerInfo()); 3895 } 3896 3897 // Return the value of the return address register, marking it an implicit 3898 // live-in. 3899 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT)); 3900 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT); 3901 } 3902 3903 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op, 3904 SelectionDAG &DAG) const { 3905 SDLoc DL(Op); 3906 SDValue Lo = Op.getOperand(0); 3907 SDValue Hi = Op.getOperand(1); 3908 SDValue Shamt = Op.getOperand(2); 3909 EVT VT = Lo.getValueType(); 3910 3911 // if Shamt-XLEN < 0: // Shamt < XLEN 3912 // Lo = Lo << Shamt 3913 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 ^ Shamt)) 3914 // else: 3915 // Lo = 0 3916 // Hi = Lo << (Shamt-XLEN) 3917 3918 SDValue Zero = DAG.getConstant(0, DL, VT); 3919 SDValue One = DAG.getConstant(1, DL, VT); 3920 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 3921 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 3922 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 3923 SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1); 3924 3925 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt); 3926 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One); 3927 SDValue ShiftRightLo = 3928 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt); 3929 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt); 3930 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo); 3931 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen); 3932 3933 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 3934 3935 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero); 3936 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 3937 3938 SDValue Parts[2] = {Lo, Hi}; 3939 return DAG.getMergeValues(Parts, DL); 3940 } 3941 3942 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, 3943 bool IsSRA) const { 3944 SDLoc DL(Op); 3945 SDValue Lo = Op.getOperand(0); 3946 SDValue Hi = Op.getOperand(1); 3947 SDValue Shamt = Op.getOperand(2); 3948 EVT VT = Lo.getValueType(); 3949 3950 // SRA expansion: 3951 // if Shamt-XLEN < 0: // Shamt < XLEN 3952 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1)) 3953 // Hi = Hi >>s Shamt 3954 // else: 3955 // Lo = Hi >>s (Shamt-XLEN); 3956 // Hi = Hi >>s (XLEN-1) 3957 // 3958 // SRL expansion: 3959 // if Shamt-XLEN < 0: // Shamt < XLEN 3960 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (ShAmt ^ XLEN-1)) 3961 // Hi = Hi >>u Shamt 3962 // else: 3963 // Lo = Hi >>u (Shamt-XLEN); 3964 // Hi = 0; 3965 3966 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL; 3967 3968 SDValue Zero = DAG.getConstant(0, DL, VT); 3969 SDValue One = DAG.getConstant(1, DL, VT); 3970 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT); 3971 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT); 3972 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen); 3973 SDValue XLenMinus1Shamt = DAG.getNode(ISD::XOR, DL, VT, Shamt, XLenMinus1); 3974 3975 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt); 3976 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One); 3977 SDValue ShiftLeftHi = 3978 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt); 3979 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi); 3980 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt); 3981 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen); 3982 SDValue HiFalse = 3983 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero; 3984 3985 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT); 3986 3987 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse); 3988 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse); 3989 3990 SDValue Parts[2] = {Lo, Hi}; 3991 return DAG.getMergeValues(Parts, DL); 3992 } 3993 3994 // Lower splats of i1 types to SETCC. For each mask vector type, we have a 3995 // legal equivalently-sized i8 type, so we can use that as a go-between. 3996 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op, 3997 SelectionDAG &DAG) const { 3998 SDLoc DL(Op); 3999 MVT VT = Op.getSimpleValueType(); 4000 SDValue SplatVal = Op.getOperand(0); 4001 // All-zeros or all-ones splats are handled specially. 4002 if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) { 4003 SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second; 4004 return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL); 4005 } 4006 if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) { 4007 SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second; 4008 return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL); 4009 } 4010 MVT XLenVT = Subtarget.getXLenVT(); 4011 assert(SplatVal.getValueType() == XLenVT && 4012 "Unexpected type for i1 splat value"); 4013 MVT InterVT = VT.changeVectorElementType(MVT::i8); 4014 SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal, 4015 DAG.getConstant(1, DL, XLenVT)); 4016 SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal); 4017 SDValue Zero = DAG.getConstant(0, DL, InterVT); 4018 return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE); 4019 } 4020 4021 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is 4022 // illegal (currently only vXi64 RV32). 4023 // FIXME: We could also catch non-constant sign-extended i32 values and lower 4024 // them to VMV_V_X_VL. 4025 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op, 4026 SelectionDAG &DAG) const { 4027 SDLoc DL(Op); 4028 MVT VecVT = Op.getSimpleValueType(); 4029 assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 && 4030 "Unexpected SPLAT_VECTOR_PARTS lowering"); 4031 4032 assert(Op.getNumOperands() == 2 && "Unexpected number of operands!"); 4033 SDValue Lo = Op.getOperand(0); 4034 SDValue Hi = Op.getOperand(1); 4035 4036 if (VecVT.isFixedLengthVector()) { 4037 MVT ContainerVT = getContainerForFixedLengthVector(VecVT); 4038 SDLoc DL(Op); 4039 SDValue Mask, VL; 4040 std::tie(Mask, VL) = 4041 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 4042 4043 SDValue Res = 4044 splatPartsI64WithVL(DL, ContainerVT, SDValue(), Lo, Hi, VL, DAG); 4045 return convertFromScalableVector(VecVT, Res, DAG, Subtarget); 4046 } 4047 4048 if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) { 4049 int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue(); 4050 int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue(); 4051 // If Hi constant is all the same sign bit as Lo, lower this as a custom 4052 // node in order to try and match RVV vector/scalar instructions. 4053 if ((LoC >> 31) == HiC) 4054 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), 4055 Lo, DAG.getRegister(RISCV::X0, MVT::i32)); 4056 } 4057 4058 // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended. 4059 if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo && 4060 isa<ConstantSDNode>(Hi.getOperand(1)) && 4061 Hi.getConstantOperandVal(1) == 31) 4062 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), Lo, 4063 DAG.getRegister(RISCV::X0, MVT::i32)); 4064 4065 // Fall back to use a stack store and stride x0 vector load. Use X0 as VL. 4066 return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, 4067 DAG.getUNDEF(VecVT), Lo, Hi, 4068 DAG.getRegister(RISCV::X0, MVT::i32)); 4069 } 4070 4071 // Custom-lower extensions from mask vectors by using a vselect either with 1 4072 // for zero/any-extension or -1 for sign-extension: 4073 // (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0) 4074 // Note that any-extension is lowered identically to zero-extension. 4075 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG, 4076 int64_t ExtTrueVal) const { 4077 SDLoc DL(Op); 4078 MVT VecVT = Op.getSimpleValueType(); 4079 SDValue Src = Op.getOperand(0); 4080 // Only custom-lower extensions from mask types 4081 assert(Src.getValueType().isVector() && 4082 Src.getValueType().getVectorElementType() == MVT::i1); 4083 4084 if (VecVT.isScalableVector()) { 4085 SDValue SplatZero = DAG.getConstant(0, DL, VecVT); 4086 SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, VecVT); 4087 return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero); 4088 } 4089 4090 MVT ContainerVT = getContainerForFixedLengthVector(VecVT); 4091 MVT I1ContainerVT = 4092 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 4093 4094 SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget); 4095 4096 SDValue Mask, VL; 4097 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 4098 4099 MVT XLenVT = Subtarget.getXLenVT(); 4100 SDValue SplatZero = DAG.getConstant(0, DL, XLenVT); 4101 SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT); 4102 4103 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 4104 DAG.getUNDEF(ContainerVT), SplatZero, VL); 4105 SplatTrueVal = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 4106 DAG.getUNDEF(ContainerVT), SplatTrueVal, VL); 4107 SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, 4108 SplatTrueVal, SplatZero, VL); 4109 4110 return convertFromScalableVector(VecVT, Select, DAG, Subtarget); 4111 } 4112 4113 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV( 4114 SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const { 4115 MVT ExtVT = Op.getSimpleValueType(); 4116 // Only custom-lower extensions from fixed-length vector types. 4117 if (!ExtVT.isFixedLengthVector()) 4118 return Op; 4119 MVT VT = Op.getOperand(0).getSimpleValueType(); 4120 // Grab the canonical container type for the extended type. Infer the smaller 4121 // type from that to ensure the same number of vector elements, as we know 4122 // the LMUL will be sufficient to hold the smaller type. 4123 MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT); 4124 // Get the extended container type manually to ensure the same number of 4125 // vector elements between source and dest. 4126 MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), 4127 ContainerExtVT.getVectorElementCount()); 4128 4129 SDValue Op1 = 4130 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 4131 4132 SDLoc DL(Op); 4133 SDValue Mask, VL; 4134 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 4135 4136 SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL); 4137 4138 return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget); 4139 } 4140 4141 // Custom-lower truncations from vectors to mask vectors by using a mask and a 4142 // setcc operation: 4143 // (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne) 4144 SDValue RISCVTargetLowering::lowerVectorMaskTruncLike(SDValue Op, 4145 SelectionDAG &DAG) const { 4146 bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE; 4147 SDLoc DL(Op); 4148 EVT MaskVT = Op.getValueType(); 4149 // Only expect to custom-lower truncations to mask types 4150 assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 && 4151 "Unexpected type for vector mask lowering"); 4152 SDValue Src = Op.getOperand(0); 4153 MVT VecVT = Src.getSimpleValueType(); 4154 SDValue Mask, VL; 4155 if (IsVPTrunc) { 4156 Mask = Op.getOperand(1); 4157 VL = Op.getOperand(2); 4158 } 4159 // If this is a fixed vector, we need to convert it to a scalable vector. 4160 MVT ContainerVT = VecVT; 4161 4162 if (VecVT.isFixedLengthVector()) { 4163 ContainerVT = getContainerForFixedLengthVector(VecVT); 4164 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 4165 if (IsVPTrunc) { 4166 MVT MaskContainerVT = 4167 getContainerForFixedLengthVector(Mask.getSimpleValueType()); 4168 Mask = convertToScalableVector(MaskContainerVT, Mask, DAG, Subtarget); 4169 } 4170 } 4171 4172 if (!IsVPTrunc) { 4173 std::tie(Mask, VL) = 4174 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 4175 } 4176 4177 SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT()); 4178 SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT()); 4179 4180 SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 4181 DAG.getUNDEF(ContainerVT), SplatOne, VL); 4182 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 4183 DAG.getUNDEF(ContainerVT), SplatZero, VL); 4184 4185 MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1); 4186 SDValue Trunc = 4187 DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL); 4188 Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero, 4189 DAG.getCondCode(ISD::SETNE), Mask, VL); 4190 if (MaskVT.isFixedLengthVector()) 4191 Trunc = convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget); 4192 return Trunc; 4193 } 4194 4195 SDValue RISCVTargetLowering::lowerVectorTruncLike(SDValue Op, 4196 SelectionDAG &DAG) const { 4197 bool IsVPTrunc = Op.getOpcode() == ISD::VP_TRUNCATE; 4198 SDLoc DL(Op); 4199 4200 MVT VT = Op.getSimpleValueType(); 4201 // Only custom-lower vector truncates 4202 assert(VT.isVector() && "Unexpected type for vector truncate lowering"); 4203 4204 // Truncates to mask types are handled differently 4205 if (VT.getVectorElementType() == MVT::i1) 4206 return lowerVectorMaskTruncLike(Op, DAG); 4207 4208 // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary 4209 // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which 4210 // truncate by one power of two at a time. 4211 MVT DstEltVT = VT.getVectorElementType(); 4212 4213 SDValue Src = Op.getOperand(0); 4214 MVT SrcVT = Src.getSimpleValueType(); 4215 MVT SrcEltVT = SrcVT.getVectorElementType(); 4216 4217 assert(DstEltVT.bitsLT(SrcEltVT) && isPowerOf2_64(DstEltVT.getSizeInBits()) && 4218 isPowerOf2_64(SrcEltVT.getSizeInBits()) && 4219 "Unexpected vector truncate lowering"); 4220 4221 MVT ContainerVT = SrcVT; 4222 SDValue Mask, VL; 4223 if (IsVPTrunc) { 4224 Mask = Op.getOperand(1); 4225 VL = Op.getOperand(2); 4226 } 4227 if (SrcVT.isFixedLengthVector()) { 4228 ContainerVT = getContainerForFixedLengthVector(SrcVT); 4229 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget); 4230 if (IsVPTrunc) { 4231 MVT MaskVT = getMaskTypeFor(ContainerVT); 4232 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 4233 } 4234 } 4235 4236 SDValue Result = Src; 4237 if (!IsVPTrunc) { 4238 std::tie(Mask, VL) = 4239 getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget); 4240 } 4241 4242 LLVMContext &Context = *DAG.getContext(); 4243 const ElementCount Count = ContainerVT.getVectorElementCount(); 4244 do { 4245 SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2); 4246 EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count); 4247 Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result, 4248 Mask, VL); 4249 } while (SrcEltVT != DstEltVT); 4250 4251 if (SrcVT.isFixedLengthVector()) 4252 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 4253 4254 return Result; 4255 } 4256 4257 SDValue 4258 RISCVTargetLowering::lowerVectorFPExtendOrRoundLike(SDValue Op, 4259 SelectionDAG &DAG) const { 4260 bool IsVP = 4261 Op.getOpcode() == ISD::VP_FP_ROUND || Op.getOpcode() == ISD::VP_FP_EXTEND; 4262 bool IsExtend = 4263 Op.getOpcode() == ISD::VP_FP_EXTEND || Op.getOpcode() == ISD::FP_EXTEND; 4264 // RVV can only do truncate fp to types half the size as the source. We 4265 // custom-lower f64->f16 rounds via RVV's round-to-odd float 4266 // conversion instruction. 4267 SDLoc DL(Op); 4268 MVT VT = Op.getSimpleValueType(); 4269 4270 assert(VT.isVector() && "Unexpected type for vector truncate lowering"); 4271 4272 SDValue Src = Op.getOperand(0); 4273 MVT SrcVT = Src.getSimpleValueType(); 4274 4275 bool IsDirectExtend = IsExtend && (VT.getVectorElementType() != MVT::f64 || 4276 SrcVT.getVectorElementType() != MVT::f16); 4277 bool IsDirectTrunc = !IsExtend && (VT.getVectorElementType() != MVT::f16 || 4278 SrcVT.getVectorElementType() != MVT::f64); 4279 4280 bool IsDirectConv = IsDirectExtend || IsDirectTrunc; 4281 4282 // For FP_ROUND/FP_EXTEND of scalable vectors, leave it to the pattern. 4283 if (!VT.isFixedLengthVector() && !IsVP && IsDirectConv) 4284 return Op; 4285 4286 // Prepare any fixed-length vector operands. 4287 MVT ContainerVT = VT; 4288 SDValue Mask, VL; 4289 if (IsVP) { 4290 Mask = Op.getOperand(1); 4291 VL = Op.getOperand(2); 4292 } 4293 if (VT.isFixedLengthVector()) { 4294 MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT); 4295 ContainerVT = 4296 SrcContainerVT.changeVectorElementType(VT.getVectorElementType()); 4297 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget); 4298 if (IsVP) { 4299 MVT MaskVT = getMaskTypeFor(ContainerVT); 4300 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 4301 } 4302 } 4303 4304 if (!IsVP) 4305 std::tie(Mask, VL) = 4306 getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget); 4307 4308 unsigned ConvOpc = IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::FP_ROUND_VL; 4309 4310 if (IsDirectConv) { 4311 Src = DAG.getNode(ConvOpc, DL, ContainerVT, Src, Mask, VL); 4312 if (VT.isFixedLengthVector()) 4313 Src = convertFromScalableVector(VT, Src, DAG, Subtarget); 4314 return Src; 4315 } 4316 4317 unsigned InterConvOpc = 4318 IsExtend ? RISCVISD::FP_EXTEND_VL : RISCVISD::VFNCVT_ROD_VL; 4319 4320 MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32); 4321 SDValue IntermediateConv = 4322 DAG.getNode(InterConvOpc, DL, InterVT, Src, Mask, VL); 4323 SDValue Result = 4324 DAG.getNode(ConvOpc, DL, ContainerVT, IntermediateConv, Mask, VL); 4325 if (VT.isFixedLengthVector()) 4326 return convertFromScalableVector(VT, Result, DAG, Subtarget); 4327 return Result; 4328 } 4329 4330 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the 4331 // first position of a vector, and that vector is slid up to the insert index. 4332 // By limiting the active vector length to index+1 and merging with the 4333 // original vector (with an undisturbed tail policy for elements >= VL), we 4334 // achieve the desired result of leaving all elements untouched except the one 4335 // at VL-1, which is replaced with the desired value. 4336 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, 4337 SelectionDAG &DAG) const { 4338 SDLoc DL(Op); 4339 MVT VecVT = Op.getSimpleValueType(); 4340 SDValue Vec = Op.getOperand(0); 4341 SDValue Val = Op.getOperand(1); 4342 SDValue Idx = Op.getOperand(2); 4343 4344 if (VecVT.getVectorElementType() == MVT::i1) { 4345 // FIXME: For now we just promote to an i8 vector and insert into that, 4346 // but this is probably not optimal. 4347 MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount()); 4348 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec); 4349 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx); 4350 return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec); 4351 } 4352 4353 MVT ContainerVT = VecVT; 4354 // If the operand is a fixed-length vector, convert to a scalable one. 4355 if (VecVT.isFixedLengthVector()) { 4356 ContainerVT = getContainerForFixedLengthVector(VecVT); 4357 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 4358 } 4359 4360 MVT XLenVT = Subtarget.getXLenVT(); 4361 4362 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 4363 bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64; 4364 // Even i64-element vectors on RV32 can be lowered without scalar 4365 // legalization if the most-significant 32 bits of the value are not affected 4366 // by the sign-extension of the lower 32 bits. 4367 // TODO: We could also catch sign extensions of a 32-bit value. 4368 if (!IsLegalInsert && isa<ConstantSDNode>(Val)) { 4369 const auto *CVal = cast<ConstantSDNode>(Val); 4370 if (isInt<32>(CVal->getSExtValue())) { 4371 IsLegalInsert = true; 4372 Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32); 4373 } 4374 } 4375 4376 SDValue Mask, VL; 4377 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 4378 4379 SDValue ValInVec; 4380 4381 if (IsLegalInsert) { 4382 unsigned Opc = 4383 VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL; 4384 if (isNullConstant(Idx)) { 4385 Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL); 4386 if (!VecVT.isFixedLengthVector()) 4387 return Vec; 4388 return convertFromScalableVector(VecVT, Vec, DAG, Subtarget); 4389 } 4390 ValInVec = 4391 DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL); 4392 } else { 4393 // On RV32, i64-element vectors must be specially handled to place the 4394 // value at element 0, by using two vslide1up instructions in sequence on 4395 // the i32 split lo/hi value. Use an equivalently-sized i32 vector for 4396 // this. 4397 SDValue One = DAG.getConstant(1, DL, XLenVT); 4398 SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero); 4399 SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One); 4400 MVT I32ContainerVT = 4401 MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2); 4402 SDValue I32Mask = 4403 getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first; 4404 // Limit the active VL to two. 4405 SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT); 4406 // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied 4407 // undef doesn't obey the earlyclobber constraint. Just splat a zero value. 4408 ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, 4409 DAG.getUNDEF(I32ContainerVT), Zero, InsertI64VL); 4410 // First slide in the hi value, then the lo in underneath it. 4411 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, 4412 DAG.getUNDEF(I32ContainerVT), ValInVec, ValHi, 4413 I32Mask, InsertI64VL); 4414 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, 4415 DAG.getUNDEF(I32ContainerVT), ValInVec, ValLo, 4416 I32Mask, InsertI64VL); 4417 // Bitcast back to the right container type. 4418 ValInVec = DAG.getBitcast(ContainerVT, ValInVec); 4419 } 4420 4421 // Now that the value is in a vector, slide it into position. 4422 SDValue InsertVL = 4423 DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT)); 4424 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, 4425 ValInVec, Idx, Mask, InsertVL); 4426 if (!VecVT.isFixedLengthVector()) 4427 return Slideup; 4428 return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); 4429 } 4430 4431 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then 4432 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer 4433 // types this is done using VMV_X_S to allow us to glean information about the 4434 // sign bits of the result. 4435 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op, 4436 SelectionDAG &DAG) const { 4437 SDLoc DL(Op); 4438 SDValue Idx = Op.getOperand(1); 4439 SDValue Vec = Op.getOperand(0); 4440 EVT EltVT = Op.getValueType(); 4441 MVT VecVT = Vec.getSimpleValueType(); 4442 MVT XLenVT = Subtarget.getXLenVT(); 4443 4444 if (VecVT.getVectorElementType() == MVT::i1) { 4445 if (VecVT.isFixedLengthVector()) { 4446 unsigned NumElts = VecVT.getVectorNumElements(); 4447 if (NumElts >= 8) { 4448 MVT WideEltVT; 4449 unsigned WidenVecLen; 4450 SDValue ExtractElementIdx; 4451 SDValue ExtractBitIdx; 4452 unsigned MaxEEW = Subtarget.getELEN(); 4453 MVT LargestEltVT = MVT::getIntegerVT( 4454 std::min(MaxEEW, unsigned(XLenVT.getSizeInBits()))); 4455 if (NumElts <= LargestEltVT.getSizeInBits()) { 4456 assert(isPowerOf2_32(NumElts) && 4457 "the number of elements should be power of 2"); 4458 WideEltVT = MVT::getIntegerVT(NumElts); 4459 WidenVecLen = 1; 4460 ExtractElementIdx = DAG.getConstant(0, DL, XLenVT); 4461 ExtractBitIdx = Idx; 4462 } else { 4463 WideEltVT = LargestEltVT; 4464 WidenVecLen = NumElts / WideEltVT.getSizeInBits(); 4465 // extract element index = index / element width 4466 ExtractElementIdx = DAG.getNode( 4467 ISD::SRL, DL, XLenVT, Idx, 4468 DAG.getConstant(Log2_64(WideEltVT.getSizeInBits()), DL, XLenVT)); 4469 // mask bit index = index % element width 4470 ExtractBitIdx = DAG.getNode( 4471 ISD::AND, DL, XLenVT, Idx, 4472 DAG.getConstant(WideEltVT.getSizeInBits() - 1, DL, XLenVT)); 4473 } 4474 MVT WideVT = MVT::getVectorVT(WideEltVT, WidenVecLen); 4475 Vec = DAG.getNode(ISD::BITCAST, DL, WideVT, Vec); 4476 SDValue ExtractElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, XLenVT, 4477 Vec, ExtractElementIdx); 4478 // Extract the bit from GPR. 4479 SDValue ShiftRight = 4480 DAG.getNode(ISD::SRL, DL, XLenVT, ExtractElt, ExtractBitIdx); 4481 return DAG.getNode(ISD::AND, DL, XLenVT, ShiftRight, 4482 DAG.getConstant(1, DL, XLenVT)); 4483 } 4484 } 4485 // Otherwise, promote to an i8 vector and extract from that. 4486 MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount()); 4487 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec); 4488 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx); 4489 } 4490 4491 // If this is a fixed vector, we need to convert it to a scalable vector. 4492 MVT ContainerVT = VecVT; 4493 if (VecVT.isFixedLengthVector()) { 4494 ContainerVT = getContainerForFixedLengthVector(VecVT); 4495 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 4496 } 4497 4498 // If the index is 0, the vector is already in the right position. 4499 if (!isNullConstant(Idx)) { 4500 // Use a VL of 1 to avoid processing more elements than we need. 4501 SDValue VL = DAG.getConstant(1, DL, XLenVT); 4502 SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG); 4503 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 4504 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 4505 } 4506 4507 if (!EltVT.isInteger()) { 4508 // Floating-point extracts are handled in TableGen. 4509 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, 4510 DAG.getConstant(0, DL, XLenVT)); 4511 } 4512 4513 SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 4514 return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0); 4515 } 4516 4517 // Some RVV intrinsics may claim that they want an integer operand to be 4518 // promoted or expanded. 4519 static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, 4520 const RISCVSubtarget &Subtarget) { 4521 assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 4522 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) && 4523 "Unexpected opcode"); 4524 4525 if (!Subtarget.hasVInstructions()) 4526 return SDValue(); 4527 4528 bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN; 4529 unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0); 4530 SDLoc DL(Op); 4531 4532 const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II = 4533 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo); 4534 if (!II || !II->hasScalarOperand()) 4535 return SDValue(); 4536 4537 unsigned SplatOp = II->ScalarOperand + 1 + HasChain; 4538 assert(SplatOp < Op.getNumOperands()); 4539 4540 SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end()); 4541 SDValue &ScalarOp = Operands[SplatOp]; 4542 MVT OpVT = ScalarOp.getSimpleValueType(); 4543 MVT XLenVT = Subtarget.getXLenVT(); 4544 4545 // If this isn't a scalar, or its type is XLenVT we're done. 4546 if (!OpVT.isScalarInteger() || OpVT == XLenVT) 4547 return SDValue(); 4548 4549 // Simplest case is that the operand needs to be promoted to XLenVT. 4550 if (OpVT.bitsLT(XLenVT)) { 4551 // If the operand is a constant, sign extend to increase our chances 4552 // of being able to use a .vi instruction. ANY_EXTEND would become a 4553 // a zero extend and the simm5 check in isel would fail. 4554 // FIXME: Should we ignore the upper bits in isel instead? 4555 unsigned ExtOpc = 4556 isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND; 4557 ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp); 4558 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 4559 } 4560 4561 // Use the previous operand to get the vXi64 VT. The result might be a mask 4562 // VT for compares. Using the previous operand assumes that the previous 4563 // operand will never have a smaller element size than a scalar operand and 4564 // that a widening operation never uses SEW=64. 4565 // NOTE: If this fails the below assert, we can probably just find the 4566 // element count from any operand or result and use it to construct the VT. 4567 assert(II->ScalarOperand > 0 && "Unexpected splat operand!"); 4568 MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType(); 4569 4570 // The more complex case is when the scalar is larger than XLenVT. 4571 assert(XLenVT == MVT::i32 && OpVT == MVT::i64 && 4572 VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!"); 4573 4574 // If this is a sign-extended 32-bit value, we can truncate it and rely on the 4575 // instruction to sign-extend since SEW>XLEN. 4576 if (DAG.ComputeNumSignBits(ScalarOp) > 32) { 4577 ScalarOp = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, ScalarOp); 4578 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 4579 } 4580 4581 switch (IntNo) { 4582 case Intrinsic::riscv_vslide1up: 4583 case Intrinsic::riscv_vslide1down: 4584 case Intrinsic::riscv_vslide1up_mask: 4585 case Intrinsic::riscv_vslide1down_mask: { 4586 // We need to special case these when the scalar is larger than XLen. 4587 unsigned NumOps = Op.getNumOperands(); 4588 bool IsMasked = NumOps == 7; 4589 4590 // Convert the vector source to the equivalent nxvXi32 vector. 4591 MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2); 4592 SDValue Vec = DAG.getBitcast(I32VT, Operands[2]); 4593 4594 SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp, 4595 DAG.getConstant(0, DL, XLenVT)); 4596 SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, ScalarOp, 4597 DAG.getConstant(1, DL, XLenVT)); 4598 4599 // Double the VL since we halved SEW. 4600 SDValue AVL = getVLOperand(Op); 4601 SDValue I32VL; 4602 4603 // Optimize for constant AVL 4604 if (isa<ConstantSDNode>(AVL)) { 4605 unsigned EltSize = VT.getScalarSizeInBits(); 4606 unsigned MinSize = VT.getSizeInBits().getKnownMinValue(); 4607 4608 unsigned VectorBitsMax = Subtarget.getRealMaxVLen(); 4609 unsigned MaxVLMAX = 4610 RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize); 4611 4612 unsigned VectorBitsMin = Subtarget.getRealMinVLen(); 4613 unsigned MinVLMAX = 4614 RISCVTargetLowering::computeVLMAX(VectorBitsMin, EltSize, MinSize); 4615 4616 uint64_t AVLInt = cast<ConstantSDNode>(AVL)->getZExtValue(); 4617 if (AVLInt <= MinVLMAX) { 4618 I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT); 4619 } else if (AVLInt >= 2 * MaxVLMAX) { 4620 // Just set vl to VLMAX in this situation 4621 RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT); 4622 SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT); 4623 unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits()); 4624 SDValue SEW = DAG.getConstant(Sew, DL, XLenVT); 4625 SDValue SETVLMAX = DAG.getTargetConstant( 4626 Intrinsic::riscv_vsetvlimax_opt, DL, MVT::i32); 4627 I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW, 4628 LMUL); 4629 } else { 4630 // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl 4631 // is related to the hardware implementation. 4632 // So let the following code handle 4633 } 4634 } 4635 if (!I32VL) { 4636 RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT); 4637 SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT); 4638 unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits()); 4639 SDValue SEW = DAG.getConstant(Sew, DL, XLenVT); 4640 SDValue SETVL = 4641 DAG.getTargetConstant(Intrinsic::riscv_vsetvli_opt, DL, MVT::i32); 4642 // Using vsetvli instruction to get actually used length which related to 4643 // the hardware implementation 4644 SDValue VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVL, AVL, 4645 SEW, LMUL); 4646 I32VL = 4647 DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT)); 4648 } 4649 4650 SDValue I32Mask = getAllOnesMask(I32VT, I32VL, DL, DAG); 4651 4652 // Shift the two scalar parts in using SEW=32 slide1up/slide1down 4653 // instructions. 4654 SDValue Passthru; 4655 if (IsMasked) 4656 Passthru = DAG.getUNDEF(I32VT); 4657 else 4658 Passthru = DAG.getBitcast(I32VT, Operands[1]); 4659 4660 if (IntNo == Intrinsic::riscv_vslide1up || 4661 IntNo == Intrinsic::riscv_vslide1up_mask) { 4662 Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec, 4663 ScalarHi, I32Mask, I32VL); 4664 Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Passthru, Vec, 4665 ScalarLo, I32Mask, I32VL); 4666 } else { 4667 Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec, 4668 ScalarLo, I32Mask, I32VL); 4669 Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Passthru, Vec, 4670 ScalarHi, I32Mask, I32VL); 4671 } 4672 4673 // Convert back to nxvXi64. 4674 Vec = DAG.getBitcast(VT, Vec); 4675 4676 if (!IsMasked) 4677 return Vec; 4678 // Apply mask after the operation. 4679 SDValue Mask = Operands[NumOps - 3]; 4680 SDValue MaskedOff = Operands[1]; 4681 // Assume Policy operand is the last operand. 4682 uint64_t Policy = 4683 cast<ConstantSDNode>(Operands[NumOps - 1])->getZExtValue(); 4684 // We don't need to select maskedoff if it's undef. 4685 if (MaskedOff.isUndef()) 4686 return Vec; 4687 // TAMU 4688 if (Policy == RISCVII::TAIL_AGNOSTIC) 4689 return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, 4690 AVL); 4691 // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma. 4692 // It's fine because vmerge does not care mask policy. 4693 return DAG.getNode(RISCVISD::VP_MERGE_VL, DL, VT, Mask, Vec, MaskedOff, 4694 AVL); 4695 } 4696 } 4697 4698 // We need to convert the scalar to a splat vector. 4699 SDValue VL = getVLOperand(Op); 4700 assert(VL.getValueType() == XLenVT); 4701 ScalarOp = splatSplitI64WithVL(DL, VT, SDValue(), ScalarOp, VL, DAG); 4702 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands); 4703 } 4704 4705 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 4706 SelectionDAG &DAG) const { 4707 unsigned IntNo = Op.getConstantOperandVal(0); 4708 SDLoc DL(Op); 4709 MVT XLenVT = Subtarget.getXLenVT(); 4710 4711 switch (IntNo) { 4712 default: 4713 break; // Don't custom lower most intrinsics. 4714 case Intrinsic::thread_pointer: { 4715 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 4716 return DAG.getRegister(RISCV::X4, PtrVT); 4717 } 4718 case Intrinsic::riscv_orc_b: 4719 case Intrinsic::riscv_brev8: { 4720 // Lower to the GORCI encoding for orc.b or the GREVI encoding for brev8. 4721 unsigned Opc = 4722 IntNo == Intrinsic::riscv_brev8 ? RISCVISD::GREV : RISCVISD::GORC; 4723 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), 4724 DAG.getConstant(7, DL, XLenVT)); 4725 } 4726 case Intrinsic::riscv_grev: 4727 case Intrinsic::riscv_gorc: { 4728 unsigned Opc = 4729 IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC; 4730 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2)); 4731 } 4732 case Intrinsic::riscv_zip: 4733 case Intrinsic::riscv_unzip: { 4734 // Lower to the SHFLI encoding for zip or the UNSHFLI encoding for unzip. 4735 // For i32 the immediate is 15. For i64 the immediate is 31. 4736 unsigned Opc = 4737 IntNo == Intrinsic::riscv_zip ? RISCVISD::SHFL : RISCVISD::UNSHFL; 4738 unsigned BitWidth = Op.getValueSizeInBits(); 4739 assert(isPowerOf2_32(BitWidth) && BitWidth >= 2 && "Unexpected bit width"); 4740 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), 4741 DAG.getConstant((BitWidth / 2) - 1, DL, XLenVT)); 4742 } 4743 case Intrinsic::riscv_shfl: 4744 case Intrinsic::riscv_unshfl: { 4745 unsigned Opc = 4746 IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL; 4747 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2)); 4748 } 4749 case Intrinsic::riscv_bcompress: 4750 case Intrinsic::riscv_bdecompress: { 4751 unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS 4752 : RISCVISD::BDECOMPRESS; 4753 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2)); 4754 } 4755 case Intrinsic::riscv_bfp: 4756 return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1), 4757 Op.getOperand(2)); 4758 case Intrinsic::riscv_fsl: 4759 return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1), 4760 Op.getOperand(2), Op.getOperand(3)); 4761 case Intrinsic::riscv_fsr: 4762 return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1), 4763 Op.getOperand(2), Op.getOperand(3)); 4764 case Intrinsic::riscv_vmv_x_s: 4765 assert(Op.getValueType() == XLenVT && "Unexpected VT!"); 4766 return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(), 4767 Op.getOperand(1)); 4768 case Intrinsic::riscv_vmv_v_x: 4769 return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2), 4770 Op.getOperand(3), Op.getSimpleValueType(), DL, DAG, 4771 Subtarget); 4772 case Intrinsic::riscv_vfmv_v_f: 4773 return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(), 4774 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 4775 case Intrinsic::riscv_vmv_s_x: { 4776 SDValue Scalar = Op.getOperand(2); 4777 4778 if (Scalar.getValueType().bitsLE(XLenVT)) { 4779 Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar); 4780 return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(), 4781 Op.getOperand(1), Scalar, Op.getOperand(3)); 4782 } 4783 4784 assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!"); 4785 4786 // This is an i64 value that lives in two scalar registers. We have to 4787 // insert this in a convoluted way. First we build vXi64 splat containing 4788 // the two values that we assemble using some bit math. Next we'll use 4789 // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask 4790 // to merge element 0 from our splat into the source vector. 4791 // FIXME: This is probably not the best way to do this, but it is 4792 // consistent with INSERT_VECTOR_ELT lowering so it is a good starting 4793 // point. 4794 // sw lo, (a0) 4795 // sw hi, 4(a0) 4796 // vlse vX, (a0) 4797 // 4798 // vid.v vVid 4799 // vmseq.vx mMask, vVid, 0 4800 // vmerge.vvm vDest, vSrc, vVal, mMask 4801 MVT VT = Op.getSimpleValueType(); 4802 SDValue Vec = Op.getOperand(1); 4803 SDValue VL = getVLOperand(Op); 4804 4805 SDValue SplattedVal = splatSplitI64WithVL(DL, VT, SDValue(), Scalar, VL, DAG); 4806 if (Op.getOperand(1).isUndef()) 4807 return SplattedVal; 4808 SDValue SplattedIdx = 4809 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT), 4810 DAG.getConstant(0, DL, MVT::i32), VL); 4811 4812 MVT MaskVT = getMaskTypeFor(VT); 4813 SDValue Mask = getAllOnesMask(VT, VL, DL, DAG); 4814 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL); 4815 SDValue SelectCond = 4816 DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx, 4817 DAG.getCondCode(ISD::SETEQ), Mask, VL); 4818 return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal, 4819 Vec, VL); 4820 } 4821 } 4822 4823 return lowerVectorIntrinsicScalars(Op, DAG, Subtarget); 4824 } 4825 4826 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op, 4827 SelectionDAG &DAG) const { 4828 unsigned IntNo = Op.getConstantOperandVal(1); 4829 switch (IntNo) { 4830 default: 4831 break; 4832 case Intrinsic::riscv_masked_strided_load: { 4833 SDLoc DL(Op); 4834 MVT XLenVT = Subtarget.getXLenVT(); 4835 4836 // If the mask is known to be all ones, optimize to an unmasked intrinsic; 4837 // the selection of the masked intrinsics doesn't do this for us. 4838 SDValue Mask = Op.getOperand(5); 4839 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 4840 4841 MVT VT = Op->getSimpleValueType(0); 4842 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4843 4844 SDValue PassThru = Op.getOperand(2); 4845 if (!IsUnmasked) { 4846 MVT MaskVT = getMaskTypeFor(ContainerVT); 4847 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 4848 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); 4849 } 4850 4851 SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 4852 4853 SDValue IntID = DAG.getTargetConstant( 4854 IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL, 4855 XLenVT); 4856 4857 auto *Load = cast<MemIntrinsicSDNode>(Op); 4858 SmallVector<SDValue, 8> Ops{Load->getChain(), IntID}; 4859 if (IsUnmasked) 4860 Ops.push_back(DAG.getUNDEF(ContainerVT)); 4861 else 4862 Ops.push_back(PassThru); 4863 Ops.push_back(Op.getOperand(3)); // Ptr 4864 Ops.push_back(Op.getOperand(4)); // Stride 4865 if (!IsUnmasked) 4866 Ops.push_back(Mask); 4867 Ops.push_back(VL); 4868 if (!IsUnmasked) { 4869 SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT); 4870 Ops.push_back(Policy); 4871 } 4872 4873 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 4874 SDValue Result = 4875 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, 4876 Load->getMemoryVT(), Load->getMemOperand()); 4877 SDValue Chain = Result.getValue(1); 4878 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 4879 return DAG.getMergeValues({Result, Chain}, DL); 4880 } 4881 case Intrinsic::riscv_seg2_load: 4882 case Intrinsic::riscv_seg3_load: 4883 case Intrinsic::riscv_seg4_load: 4884 case Intrinsic::riscv_seg5_load: 4885 case Intrinsic::riscv_seg6_load: 4886 case Intrinsic::riscv_seg7_load: 4887 case Intrinsic::riscv_seg8_load: { 4888 SDLoc DL(Op); 4889 static const Intrinsic::ID VlsegInts[7] = { 4890 Intrinsic::riscv_vlseg2, Intrinsic::riscv_vlseg3, 4891 Intrinsic::riscv_vlseg4, Intrinsic::riscv_vlseg5, 4892 Intrinsic::riscv_vlseg6, Intrinsic::riscv_vlseg7, 4893 Intrinsic::riscv_vlseg8}; 4894 unsigned NF = Op->getNumValues() - 1; 4895 assert(NF >= 2 && NF <= 8 && "Unexpected seg number"); 4896 MVT XLenVT = Subtarget.getXLenVT(); 4897 MVT VT = Op->getSimpleValueType(0); 4898 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4899 4900 SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 4901 SDValue IntID = DAG.getTargetConstant(VlsegInts[NF - 2], DL, XLenVT); 4902 auto *Load = cast<MemIntrinsicSDNode>(Op); 4903 SmallVector<EVT, 9> ContainerVTs(NF, ContainerVT); 4904 ContainerVTs.push_back(MVT::Other); 4905 SDVTList VTs = DAG.getVTList(ContainerVTs); 4906 SmallVector<SDValue, 12> Ops = {Load->getChain(), IntID}; 4907 Ops.insert(Ops.end(), NF, DAG.getUNDEF(ContainerVT)); 4908 Ops.push_back(Op.getOperand(2)); 4909 Ops.push_back(VL); 4910 SDValue Result = 4911 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, 4912 Load->getMemoryVT(), Load->getMemOperand()); 4913 SmallVector<SDValue, 9> Results; 4914 for (unsigned int RetIdx = 0; RetIdx < NF; RetIdx++) 4915 Results.push_back(convertFromScalableVector(VT, Result.getValue(RetIdx), 4916 DAG, Subtarget)); 4917 Results.push_back(Result.getValue(NF)); 4918 return DAG.getMergeValues(Results, DL); 4919 } 4920 } 4921 4922 return lowerVectorIntrinsicScalars(Op, DAG, Subtarget); 4923 } 4924 4925 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op, 4926 SelectionDAG &DAG) const { 4927 unsigned IntNo = Op.getConstantOperandVal(1); 4928 switch (IntNo) { 4929 default: 4930 break; 4931 case Intrinsic::riscv_masked_strided_store: { 4932 SDLoc DL(Op); 4933 MVT XLenVT = Subtarget.getXLenVT(); 4934 4935 // If the mask is known to be all ones, optimize to an unmasked intrinsic; 4936 // the selection of the masked intrinsics doesn't do this for us. 4937 SDValue Mask = Op.getOperand(5); 4938 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 4939 4940 SDValue Val = Op.getOperand(2); 4941 MVT VT = Val.getSimpleValueType(); 4942 MVT ContainerVT = getContainerForFixedLengthVector(VT); 4943 4944 Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); 4945 if (!IsUnmasked) { 4946 MVT MaskVT = getMaskTypeFor(ContainerVT); 4947 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 4948 } 4949 4950 SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 4951 4952 SDValue IntID = DAG.getTargetConstant( 4953 IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL, 4954 XLenVT); 4955 4956 auto *Store = cast<MemIntrinsicSDNode>(Op); 4957 SmallVector<SDValue, 8> Ops{Store->getChain(), IntID}; 4958 Ops.push_back(Val); 4959 Ops.push_back(Op.getOperand(3)); // Ptr 4960 Ops.push_back(Op.getOperand(4)); // Stride 4961 if (!IsUnmasked) 4962 Ops.push_back(Mask); 4963 Ops.push_back(VL); 4964 4965 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(), 4966 Ops, Store->getMemoryVT(), 4967 Store->getMemOperand()); 4968 } 4969 } 4970 4971 return SDValue(); 4972 } 4973 4974 static MVT getLMUL1VT(MVT VT) { 4975 assert(VT.getVectorElementType().getSizeInBits() <= 64 && 4976 "Unexpected vector MVT"); 4977 return MVT::getScalableVectorVT( 4978 VT.getVectorElementType(), 4979 RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits()); 4980 } 4981 4982 static unsigned getRVVReductionOp(unsigned ISDOpcode) { 4983 switch (ISDOpcode) { 4984 default: 4985 llvm_unreachable("Unhandled reduction"); 4986 case ISD::VECREDUCE_ADD: 4987 return RISCVISD::VECREDUCE_ADD_VL; 4988 case ISD::VECREDUCE_UMAX: 4989 return RISCVISD::VECREDUCE_UMAX_VL; 4990 case ISD::VECREDUCE_SMAX: 4991 return RISCVISD::VECREDUCE_SMAX_VL; 4992 case ISD::VECREDUCE_UMIN: 4993 return RISCVISD::VECREDUCE_UMIN_VL; 4994 case ISD::VECREDUCE_SMIN: 4995 return RISCVISD::VECREDUCE_SMIN_VL; 4996 case ISD::VECREDUCE_AND: 4997 return RISCVISD::VECREDUCE_AND_VL; 4998 case ISD::VECREDUCE_OR: 4999 return RISCVISD::VECREDUCE_OR_VL; 5000 case ISD::VECREDUCE_XOR: 5001 return RISCVISD::VECREDUCE_XOR_VL; 5002 } 5003 } 5004 5005 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op, 5006 SelectionDAG &DAG, 5007 bool IsVP) const { 5008 SDLoc DL(Op); 5009 SDValue Vec = Op.getOperand(IsVP ? 1 : 0); 5010 MVT VecVT = Vec.getSimpleValueType(); 5011 assert((Op.getOpcode() == ISD::VECREDUCE_AND || 5012 Op.getOpcode() == ISD::VECREDUCE_OR || 5013 Op.getOpcode() == ISD::VECREDUCE_XOR || 5014 Op.getOpcode() == ISD::VP_REDUCE_AND || 5015 Op.getOpcode() == ISD::VP_REDUCE_OR || 5016 Op.getOpcode() == ISD::VP_REDUCE_XOR) && 5017 "Unexpected reduction lowering"); 5018 5019 MVT XLenVT = Subtarget.getXLenVT(); 5020 assert(Op.getValueType() == XLenVT && 5021 "Expected reduction output to be legalized to XLenVT"); 5022 5023 MVT ContainerVT = VecVT; 5024 if (VecVT.isFixedLengthVector()) { 5025 ContainerVT = getContainerForFixedLengthVector(VecVT); 5026 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 5027 } 5028 5029 SDValue Mask, VL; 5030 if (IsVP) { 5031 Mask = Op.getOperand(2); 5032 VL = Op.getOperand(3); 5033 } else { 5034 std::tie(Mask, VL) = 5035 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 5036 } 5037 5038 unsigned BaseOpc; 5039 ISD::CondCode CC; 5040 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 5041 5042 switch (Op.getOpcode()) { 5043 default: 5044 llvm_unreachable("Unhandled reduction"); 5045 case ISD::VECREDUCE_AND: 5046 case ISD::VP_REDUCE_AND: { 5047 // vcpop ~x == 0 5048 SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); 5049 Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL); 5050 Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL); 5051 CC = ISD::SETEQ; 5052 BaseOpc = ISD::AND; 5053 break; 5054 } 5055 case ISD::VECREDUCE_OR: 5056 case ISD::VP_REDUCE_OR: 5057 // vcpop x != 0 5058 Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL); 5059 CC = ISD::SETNE; 5060 BaseOpc = ISD::OR; 5061 break; 5062 case ISD::VECREDUCE_XOR: 5063 case ISD::VP_REDUCE_XOR: { 5064 // ((vcpop x) & 1) != 0 5065 SDValue One = DAG.getConstant(1, DL, XLenVT); 5066 Vec = DAG.getNode(RISCVISD::VCPOP_VL, DL, XLenVT, Vec, Mask, VL); 5067 Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One); 5068 CC = ISD::SETNE; 5069 BaseOpc = ISD::XOR; 5070 break; 5071 } 5072 } 5073 5074 SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC); 5075 5076 if (!IsVP) 5077 return SetCC; 5078 5079 // Now include the start value in the operation. 5080 // Note that we must return the start value when no elements are operated 5081 // upon. The vcpop instructions we've emitted in each case above will return 5082 // 0 for an inactive vector, and so we've already received the neutral value: 5083 // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we 5084 // can simply include the start value. 5085 return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0)); 5086 } 5087 5088 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op, 5089 SelectionDAG &DAG) const { 5090 SDLoc DL(Op); 5091 SDValue Vec = Op.getOperand(0); 5092 EVT VecEVT = Vec.getValueType(); 5093 5094 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode()); 5095 5096 // Due to ordering in legalize types we may have a vector type that needs to 5097 // be split. Do that manually so we can get down to a legal type. 5098 while (getTypeAction(*DAG.getContext(), VecEVT) == 5099 TargetLowering::TypeSplitVector) { 5100 SDValue Lo, Hi; 5101 std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL); 5102 VecEVT = Lo.getValueType(); 5103 Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi); 5104 } 5105 5106 // TODO: The type may need to be widened rather than split. Or widened before 5107 // it can be split. 5108 if (!isTypeLegal(VecEVT)) 5109 return SDValue(); 5110 5111 MVT VecVT = VecEVT.getSimpleVT(); 5112 MVT VecEltVT = VecVT.getVectorElementType(); 5113 unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode()); 5114 5115 MVT ContainerVT = VecVT; 5116 if (VecVT.isFixedLengthVector()) { 5117 ContainerVT = getContainerForFixedLengthVector(VecVT); 5118 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 5119 } 5120 5121 MVT M1VT = getLMUL1VT(ContainerVT); 5122 MVT XLenVT = Subtarget.getXLenVT(); 5123 5124 SDValue Mask, VL; 5125 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 5126 5127 SDValue NeutralElem = 5128 DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags()); 5129 SDValue IdentitySplat = 5130 lowerScalarSplat(SDValue(), NeutralElem, DAG.getConstant(1, DL, XLenVT), 5131 M1VT, DL, DAG, Subtarget); 5132 SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec, 5133 IdentitySplat, Mask, VL); 5134 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 5135 DAG.getConstant(0, DL, XLenVT)); 5136 return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); 5137 } 5138 5139 // Given a reduction op, this function returns the matching reduction opcode, 5140 // the vector SDValue and the scalar SDValue required to lower this to a 5141 // RISCVISD node. 5142 static std::tuple<unsigned, SDValue, SDValue> 5143 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) { 5144 SDLoc DL(Op); 5145 auto Flags = Op->getFlags(); 5146 unsigned Opcode = Op.getOpcode(); 5147 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode); 5148 switch (Opcode) { 5149 default: 5150 llvm_unreachable("Unhandled reduction"); 5151 case ISD::VECREDUCE_FADD: { 5152 // Use positive zero if we can. It is cheaper to materialize. 5153 SDValue Zero = 5154 DAG.getConstantFP(Flags.hasNoSignedZeros() ? 0.0 : -0.0, DL, EltVT); 5155 return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0), Zero); 5156 } 5157 case ISD::VECREDUCE_SEQ_FADD: 5158 return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1), 5159 Op.getOperand(0)); 5160 case ISD::VECREDUCE_FMIN: 5161 return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0), 5162 DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags)); 5163 case ISD::VECREDUCE_FMAX: 5164 return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0), 5165 DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags)); 5166 } 5167 } 5168 5169 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op, 5170 SelectionDAG &DAG) const { 5171 SDLoc DL(Op); 5172 MVT VecEltVT = Op.getSimpleValueType(); 5173 5174 unsigned RVVOpcode; 5175 SDValue VectorVal, ScalarVal; 5176 std::tie(RVVOpcode, VectorVal, ScalarVal) = 5177 getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT); 5178 MVT VecVT = VectorVal.getSimpleValueType(); 5179 5180 MVT ContainerVT = VecVT; 5181 if (VecVT.isFixedLengthVector()) { 5182 ContainerVT = getContainerForFixedLengthVector(VecVT); 5183 VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget); 5184 } 5185 5186 MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType()); 5187 MVT XLenVT = Subtarget.getXLenVT(); 5188 5189 SDValue Mask, VL; 5190 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget); 5191 5192 SDValue ScalarSplat = 5193 lowerScalarSplat(SDValue(), ScalarVal, DAG.getConstant(1, DL, XLenVT), 5194 M1VT, DL, DAG, Subtarget); 5195 SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), 5196 VectorVal, ScalarSplat, Mask, VL); 5197 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction, 5198 DAG.getConstant(0, DL, XLenVT)); 5199 } 5200 5201 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) { 5202 switch (ISDOpcode) { 5203 default: 5204 llvm_unreachable("Unhandled reduction"); 5205 case ISD::VP_REDUCE_ADD: 5206 return RISCVISD::VECREDUCE_ADD_VL; 5207 case ISD::VP_REDUCE_UMAX: 5208 return RISCVISD::VECREDUCE_UMAX_VL; 5209 case ISD::VP_REDUCE_SMAX: 5210 return RISCVISD::VECREDUCE_SMAX_VL; 5211 case ISD::VP_REDUCE_UMIN: 5212 return RISCVISD::VECREDUCE_UMIN_VL; 5213 case ISD::VP_REDUCE_SMIN: 5214 return RISCVISD::VECREDUCE_SMIN_VL; 5215 case ISD::VP_REDUCE_AND: 5216 return RISCVISD::VECREDUCE_AND_VL; 5217 case ISD::VP_REDUCE_OR: 5218 return RISCVISD::VECREDUCE_OR_VL; 5219 case ISD::VP_REDUCE_XOR: 5220 return RISCVISD::VECREDUCE_XOR_VL; 5221 case ISD::VP_REDUCE_FADD: 5222 return RISCVISD::VECREDUCE_FADD_VL; 5223 case ISD::VP_REDUCE_SEQ_FADD: 5224 return RISCVISD::VECREDUCE_SEQ_FADD_VL; 5225 case ISD::VP_REDUCE_FMAX: 5226 return RISCVISD::VECREDUCE_FMAX_VL; 5227 case ISD::VP_REDUCE_FMIN: 5228 return RISCVISD::VECREDUCE_FMIN_VL; 5229 } 5230 } 5231 5232 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op, 5233 SelectionDAG &DAG) const { 5234 SDLoc DL(Op); 5235 SDValue Vec = Op.getOperand(1); 5236 EVT VecEVT = Vec.getValueType(); 5237 5238 // TODO: The type may need to be widened rather than split. Or widened before 5239 // it can be split. 5240 if (!isTypeLegal(VecEVT)) 5241 return SDValue(); 5242 5243 MVT VecVT = VecEVT.getSimpleVT(); 5244 MVT VecEltVT = VecVT.getVectorElementType(); 5245 unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode()); 5246 5247 MVT ContainerVT = VecVT; 5248 if (VecVT.isFixedLengthVector()) { 5249 ContainerVT = getContainerForFixedLengthVector(VecVT); 5250 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 5251 } 5252 5253 SDValue VL = Op.getOperand(3); 5254 SDValue Mask = Op.getOperand(2); 5255 5256 MVT M1VT = getLMUL1VT(ContainerVT); 5257 MVT XLenVT = Subtarget.getXLenVT(); 5258 MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT; 5259 5260 SDValue StartSplat = lowerScalarSplat(SDValue(), Op.getOperand(0), 5261 DAG.getConstant(1, DL, XLenVT), M1VT, 5262 DL, DAG, Subtarget); 5263 SDValue Reduction = 5264 DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL); 5265 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction, 5266 DAG.getConstant(0, DL, XLenVT)); 5267 if (!VecVT.isInteger()) 5268 return Elt0; 5269 return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType()); 5270 } 5271 5272 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, 5273 SelectionDAG &DAG) const { 5274 SDValue Vec = Op.getOperand(0); 5275 SDValue SubVec = Op.getOperand(1); 5276 MVT VecVT = Vec.getSimpleValueType(); 5277 MVT SubVecVT = SubVec.getSimpleValueType(); 5278 5279 SDLoc DL(Op); 5280 MVT XLenVT = Subtarget.getXLenVT(); 5281 unsigned OrigIdx = Op.getConstantOperandVal(2); 5282 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5283 5284 // We don't have the ability to slide mask vectors up indexed by their i1 5285 // elements; the smallest we can do is i8. Often we are able to bitcast to 5286 // equivalent i8 vectors. Note that when inserting a fixed-length vector 5287 // into a scalable one, we might not necessarily have enough scalable 5288 // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid. 5289 if (SubVecVT.getVectorElementType() == MVT::i1 && 5290 (OrigIdx != 0 || !Vec.isUndef())) { 5291 if (VecVT.getVectorMinNumElements() >= 8 && 5292 SubVecVT.getVectorMinNumElements() >= 8) { 5293 assert(OrigIdx % 8 == 0 && "Invalid index"); 5294 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 5295 SubVecVT.getVectorMinNumElements() % 8 == 0 && 5296 "Unexpected mask vector lowering"); 5297 OrigIdx /= 8; 5298 SubVecVT = 5299 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 5300 SubVecVT.isScalableVector()); 5301 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 5302 VecVT.isScalableVector()); 5303 Vec = DAG.getBitcast(VecVT, Vec); 5304 SubVec = DAG.getBitcast(SubVecVT, SubVec); 5305 } else { 5306 // We can't slide this mask vector up indexed by its i1 elements. 5307 // This poses a problem when we wish to insert a scalable vector which 5308 // can't be re-expressed as a larger type. Just choose the slow path and 5309 // extend to a larger type, then truncate back down. 5310 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 5311 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 5312 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 5313 SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec); 5314 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec, 5315 Op.getOperand(2)); 5316 SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT); 5317 return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE); 5318 } 5319 } 5320 5321 // If the subvector vector is a fixed-length type, we cannot use subregister 5322 // manipulation to simplify the codegen; we don't know which register of a 5323 // LMUL group contains the specific subvector as we only know the minimum 5324 // register size. Therefore we must slide the vector group up the full 5325 // amount. 5326 if (SubVecVT.isFixedLengthVector()) { 5327 if (OrigIdx == 0 && Vec.isUndef() && !VecVT.isFixedLengthVector()) 5328 return Op; 5329 MVT ContainerVT = VecVT; 5330 if (VecVT.isFixedLengthVector()) { 5331 ContainerVT = getContainerForFixedLengthVector(VecVT); 5332 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 5333 } 5334 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT, 5335 DAG.getUNDEF(ContainerVT), SubVec, 5336 DAG.getConstant(0, DL, XLenVT)); 5337 if (OrigIdx == 0 && Vec.isUndef() && VecVT.isFixedLengthVector()) { 5338 SubVec = convertFromScalableVector(VecVT, SubVec, DAG, Subtarget); 5339 return DAG.getBitcast(Op.getValueType(), SubVec); 5340 } 5341 SDValue Mask = 5342 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 5343 // Set the vector length to only the number of elements we care about. Note 5344 // that for slideup this includes the offset. 5345 SDValue VL = 5346 DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT); 5347 SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 5348 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec, 5349 SubVec, SlideupAmt, Mask, VL); 5350 if (VecVT.isFixedLengthVector()) 5351 Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget); 5352 return DAG.getBitcast(Op.getValueType(), Slideup); 5353 } 5354 5355 unsigned SubRegIdx, RemIdx; 5356 std::tie(SubRegIdx, RemIdx) = 5357 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 5358 VecVT, SubVecVT, OrigIdx, TRI); 5359 5360 RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT); 5361 bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 || 5362 SubVecLMUL == RISCVII::VLMUL::LMUL_F4 || 5363 SubVecLMUL == RISCVII::VLMUL::LMUL_F8; 5364 5365 // 1. If the Idx has been completely eliminated and this subvector's size is 5366 // a vector register or a multiple thereof, or the surrounding elements are 5367 // undef, then this is a subvector insert which naturally aligns to a vector 5368 // register. These can easily be handled using subregister manipulation. 5369 // 2. If the subvector is smaller than a vector register, then the insertion 5370 // must preserve the undisturbed elements of the register. We do this by 5371 // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type 5372 // (which resolves to a subregister copy), performing a VSLIDEUP to place the 5373 // subvector within the vector register, and an INSERT_SUBVECTOR of that 5374 // LMUL=1 type back into the larger vector (resolving to another subregister 5375 // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type 5376 // to avoid allocating a large register group to hold our subvector. 5377 if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef())) 5378 return Op; 5379 5380 // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements 5381 // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy 5382 // (in our case undisturbed). This means we can set up a subvector insertion 5383 // where OFFSET is the insertion offset, and the VL is the OFFSET plus the 5384 // size of the subvector. 5385 MVT InterSubVT = VecVT; 5386 SDValue AlignedExtract = Vec; 5387 unsigned AlignedIdx = OrigIdx - RemIdx; 5388 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 5389 InterSubVT = getLMUL1VT(VecVT); 5390 // Extract a subvector equal to the nearest full vector register type. This 5391 // should resolve to a EXTRACT_SUBREG instruction. 5392 AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 5393 DAG.getConstant(AlignedIdx, DL, XLenVT)); 5394 } 5395 5396 SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT); 5397 // For scalable vectors this must be further multiplied by vscale. 5398 SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt); 5399 5400 SDValue Mask, VL; 5401 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); 5402 5403 // Construct the vector length corresponding to RemIdx + length(SubVecVT). 5404 VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT); 5405 VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL); 5406 VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL); 5407 5408 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT, 5409 DAG.getUNDEF(InterSubVT), SubVec, 5410 DAG.getConstant(0, DL, XLenVT)); 5411 5412 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT, 5413 AlignedExtract, SubVec, SlideupAmt, Mask, VL); 5414 5415 // If required, insert this subvector back into the correct vector register. 5416 // This should resolve to an INSERT_SUBREG instruction. 5417 if (VecVT.bitsGT(InterSubVT)) 5418 Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup, 5419 DAG.getConstant(AlignedIdx, DL, XLenVT)); 5420 5421 // We might have bitcast from a mask type: cast back to the original type if 5422 // required. 5423 return DAG.getBitcast(Op.getSimpleValueType(), Slideup); 5424 } 5425 5426 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op, 5427 SelectionDAG &DAG) const { 5428 SDValue Vec = Op.getOperand(0); 5429 MVT SubVecVT = Op.getSimpleValueType(); 5430 MVT VecVT = Vec.getSimpleValueType(); 5431 5432 SDLoc DL(Op); 5433 MVT XLenVT = Subtarget.getXLenVT(); 5434 unsigned OrigIdx = Op.getConstantOperandVal(1); 5435 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 5436 5437 // We don't have the ability to slide mask vectors down indexed by their i1 5438 // elements; the smallest we can do is i8. Often we are able to bitcast to 5439 // equivalent i8 vectors. Note that when extracting a fixed-length vector 5440 // from a scalable one, we might not necessarily have enough scalable 5441 // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid. 5442 if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) { 5443 if (VecVT.getVectorMinNumElements() >= 8 && 5444 SubVecVT.getVectorMinNumElements() >= 8) { 5445 assert(OrigIdx % 8 == 0 && "Invalid index"); 5446 assert(VecVT.getVectorMinNumElements() % 8 == 0 && 5447 SubVecVT.getVectorMinNumElements() % 8 == 0 && 5448 "Unexpected mask vector lowering"); 5449 OrigIdx /= 8; 5450 SubVecVT = 5451 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8, 5452 SubVecVT.isScalableVector()); 5453 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8, 5454 VecVT.isScalableVector()); 5455 Vec = DAG.getBitcast(VecVT, Vec); 5456 } else { 5457 // We can't slide this mask vector down, indexed by its i1 elements. 5458 // This poses a problem when we wish to extract a scalable vector which 5459 // can't be re-expressed as a larger type. Just choose the slow path and 5460 // extend to a larger type, then truncate back down. 5461 // TODO: We could probably improve this when extracting certain fixed 5462 // from fixed, where we can extract as i8 and shift the correct element 5463 // right to reach the desired subvector? 5464 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8); 5465 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8); 5466 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec); 5467 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec, 5468 Op.getOperand(1)); 5469 SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT); 5470 return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE); 5471 } 5472 } 5473 5474 // If the subvector vector is a fixed-length type, we cannot use subregister 5475 // manipulation to simplify the codegen; we don't know which register of a 5476 // LMUL group contains the specific subvector as we only know the minimum 5477 // register size. Therefore we must slide the vector group down the full 5478 // amount. 5479 if (SubVecVT.isFixedLengthVector()) { 5480 // With an index of 0 this is a cast-like subvector, which can be performed 5481 // with subregister operations. 5482 if (OrigIdx == 0) 5483 return Op; 5484 MVT ContainerVT = VecVT; 5485 if (VecVT.isFixedLengthVector()) { 5486 ContainerVT = getContainerForFixedLengthVector(VecVT); 5487 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 5488 } 5489 SDValue Mask = 5490 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first; 5491 // Set the vector length to only the number of elements we care about. This 5492 // avoids sliding down elements we're going to discard straight away. 5493 SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT); 5494 SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT); 5495 SDValue Slidedown = 5496 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 5497 DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL); 5498 // Now we can use a cast-like subvector extract to get the result. 5499 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 5500 DAG.getConstant(0, DL, XLenVT)); 5501 return DAG.getBitcast(Op.getValueType(), Slidedown); 5502 } 5503 5504 unsigned SubRegIdx, RemIdx; 5505 std::tie(SubRegIdx, RemIdx) = 5506 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 5507 VecVT, SubVecVT, OrigIdx, TRI); 5508 5509 // If the Idx has been completely eliminated then this is a subvector extract 5510 // which naturally aligns to a vector register. These can easily be handled 5511 // using subregister manipulation. 5512 if (RemIdx == 0) 5513 return Op; 5514 5515 // Else we must shift our vector register directly to extract the subvector. 5516 // Do this using VSLIDEDOWN. 5517 5518 // If the vector type is an LMUL-group type, extract a subvector equal to the 5519 // nearest full vector register type. This should resolve to a EXTRACT_SUBREG 5520 // instruction. 5521 MVT InterSubVT = VecVT; 5522 if (VecVT.bitsGT(getLMUL1VT(VecVT))) { 5523 InterSubVT = getLMUL1VT(VecVT); 5524 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec, 5525 DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT)); 5526 } 5527 5528 // Slide this vector register down by the desired number of elements in order 5529 // to place the desired subvector starting at element 0. 5530 SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT); 5531 // For scalable vectors this must be further multiplied by vscale. 5532 SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt); 5533 5534 SDValue Mask, VL; 5535 std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget); 5536 SDValue Slidedown = 5537 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT, 5538 DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL); 5539 5540 // Now the vector is in the right position, extract our final subvector. This 5541 // should resolve to a COPY. 5542 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown, 5543 DAG.getConstant(0, DL, XLenVT)); 5544 5545 // We might have bitcast from a mask type: cast back to the original type if 5546 // required. 5547 return DAG.getBitcast(Op.getSimpleValueType(), Slidedown); 5548 } 5549 5550 // Lower step_vector to the vid instruction. Any non-identity step value must 5551 // be accounted for my manual expansion. 5552 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op, 5553 SelectionDAG &DAG) const { 5554 SDLoc DL(Op); 5555 MVT VT = Op.getSimpleValueType(); 5556 MVT XLenVT = Subtarget.getXLenVT(); 5557 SDValue Mask, VL; 5558 std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget); 5559 SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL); 5560 uint64_t StepValImm = Op.getConstantOperandVal(0); 5561 if (StepValImm != 1) { 5562 if (isPowerOf2_64(StepValImm)) { 5563 SDValue StepVal = 5564 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT), 5565 DAG.getConstant(Log2_64(StepValImm), DL, XLenVT)); 5566 StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal); 5567 } else { 5568 SDValue StepVal = lowerScalarSplat( 5569 SDValue(), DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), 5570 VL, VT, DL, DAG, Subtarget); 5571 StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal); 5572 } 5573 } 5574 return StepVec; 5575 } 5576 5577 // Implement vector_reverse using vrgather.vv with indices determined by 5578 // subtracting the id of each element from (VLMAX-1). This will convert 5579 // the indices like so: 5580 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0). 5581 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16. 5582 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op, 5583 SelectionDAG &DAG) const { 5584 SDLoc DL(Op); 5585 MVT VecVT = Op.getSimpleValueType(); 5586 unsigned EltSize = VecVT.getScalarSizeInBits(); 5587 unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue(); 5588 5589 unsigned MaxVLMAX = 0; 5590 unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits(); 5591 if (VectorBitsMax != 0) 5592 MaxVLMAX = 5593 RISCVTargetLowering::computeVLMAX(VectorBitsMax, EltSize, MinSize); 5594 5595 unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL; 5596 MVT IntVT = VecVT.changeVectorElementTypeToInteger(); 5597 5598 // If this is SEW=8 and VLMAX is unknown or more than 256, we need 5599 // to use vrgatherei16.vv. 5600 // TODO: It's also possible to use vrgatherei16.vv for other types to 5601 // decrease register width for the index calculation. 5602 if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) { 5603 // If this is LMUL=8, we have to split before can use vrgatherei16.vv. 5604 // Reverse each half, then reassemble them in reverse order. 5605 // NOTE: It's also possible that after splitting that VLMAX no longer 5606 // requires vrgatherei16.vv. 5607 if (MinSize == (8 * RISCV::RVVBitsPerBlock)) { 5608 SDValue Lo, Hi; 5609 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0); 5610 EVT LoVT, HiVT; 5611 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT); 5612 Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo); 5613 Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi); 5614 // Reassemble the low and high pieces reversed. 5615 // FIXME: This is a CONCAT_VECTORS. 5616 SDValue Res = 5617 DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi, 5618 DAG.getIntPtrConstant(0, DL)); 5619 return DAG.getNode( 5620 ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo, 5621 DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL)); 5622 } 5623 5624 // Just promote the int type to i16 which will double the LMUL. 5625 IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount()); 5626 GatherOpc = RISCVISD::VRGATHEREI16_VV_VL; 5627 } 5628 5629 MVT XLenVT = Subtarget.getXLenVT(); 5630 SDValue Mask, VL; 5631 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget); 5632 5633 // Calculate VLMAX-1 for the desired SEW. 5634 unsigned MinElts = VecVT.getVectorMinNumElements(); 5635 SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT, 5636 DAG.getConstant(MinElts, DL, XLenVT)); 5637 SDValue VLMinus1 = 5638 DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT)); 5639 5640 // Splat VLMAX-1 taking care to handle SEW==64 on RV32. 5641 bool IsRV32E64 = 5642 !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64; 5643 SDValue SplatVL; 5644 if (!IsRV32E64) 5645 SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1); 5646 else 5647 SplatVL = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, DAG.getUNDEF(IntVT), 5648 VLMinus1, DAG.getRegister(RISCV::X0, XLenVT)); 5649 5650 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL); 5651 SDValue Indices = 5652 DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL); 5653 5654 return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL); 5655 } 5656 5657 SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op, 5658 SelectionDAG &DAG) const { 5659 SDLoc DL(Op); 5660 SDValue V1 = Op.getOperand(0); 5661 SDValue V2 = Op.getOperand(1); 5662 MVT XLenVT = Subtarget.getXLenVT(); 5663 MVT VecVT = Op.getSimpleValueType(); 5664 5665 unsigned MinElts = VecVT.getVectorMinNumElements(); 5666 SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT, 5667 DAG.getConstant(MinElts, DL, XLenVT)); 5668 5669 int64_t ImmValue = cast<ConstantSDNode>(Op.getOperand(2))->getSExtValue(); 5670 SDValue DownOffset, UpOffset; 5671 if (ImmValue >= 0) { 5672 // The operand is a TargetConstant, we need to rebuild it as a regular 5673 // constant. 5674 DownOffset = DAG.getConstant(ImmValue, DL, XLenVT); 5675 UpOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DownOffset); 5676 } else { 5677 // The operand is a TargetConstant, we need to rebuild it as a regular 5678 // constant rather than negating the original operand. 5679 UpOffset = DAG.getConstant(-ImmValue, DL, XLenVT); 5680 DownOffset = DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, UpOffset); 5681 } 5682 5683 SDValue TrueMask = getAllOnesMask(VecVT, VLMax, DL, DAG); 5684 5685 SDValue SlideDown = 5686 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VecVT, DAG.getUNDEF(VecVT), V1, 5687 DownOffset, TrueMask, UpOffset); 5688 return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VecVT, SlideDown, V2, UpOffset, 5689 TrueMask, 5690 DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT)); 5691 } 5692 5693 SDValue 5694 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op, 5695 SelectionDAG &DAG) const { 5696 SDLoc DL(Op); 5697 auto *Load = cast<LoadSDNode>(Op); 5698 5699 assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 5700 Load->getMemoryVT(), 5701 *Load->getMemOperand()) && 5702 "Expecting a correctly-aligned load"); 5703 5704 MVT VT = Op.getSimpleValueType(); 5705 MVT XLenVT = Subtarget.getXLenVT(); 5706 MVT ContainerVT = getContainerForFixedLengthVector(VT); 5707 5708 SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 5709 5710 bool IsMaskOp = VT.getVectorElementType() == MVT::i1; 5711 SDValue IntID = DAG.getTargetConstant( 5712 IsMaskOp ? Intrinsic::riscv_vlm : Intrinsic::riscv_vle, DL, XLenVT); 5713 SmallVector<SDValue, 4> Ops{Load->getChain(), IntID}; 5714 if (!IsMaskOp) 5715 Ops.push_back(DAG.getUNDEF(ContainerVT)); 5716 Ops.push_back(Load->getBasePtr()); 5717 Ops.push_back(VL); 5718 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 5719 SDValue NewLoad = 5720 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, 5721 Load->getMemoryVT(), Load->getMemOperand()); 5722 5723 SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget); 5724 return DAG.getMergeValues({Result, Load->getChain()}, DL); 5725 } 5726 5727 SDValue 5728 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op, 5729 SelectionDAG &DAG) const { 5730 SDLoc DL(Op); 5731 auto *Store = cast<StoreSDNode>(Op); 5732 5733 assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 5734 Store->getMemoryVT(), 5735 *Store->getMemOperand()) && 5736 "Expecting a correctly-aligned store"); 5737 5738 SDValue StoreVal = Store->getValue(); 5739 MVT VT = StoreVal.getSimpleValueType(); 5740 MVT XLenVT = Subtarget.getXLenVT(); 5741 5742 // If the size less than a byte, we need to pad with zeros to make a byte. 5743 if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) { 5744 VT = MVT::v8i1; 5745 StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 5746 DAG.getConstant(0, DL, VT), StoreVal, 5747 DAG.getIntPtrConstant(0, DL)); 5748 } 5749 5750 MVT ContainerVT = getContainerForFixedLengthVector(VT); 5751 5752 SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT); 5753 5754 SDValue NewValue = 5755 convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget); 5756 5757 bool IsMaskOp = VT.getVectorElementType() == MVT::i1; 5758 SDValue IntID = DAG.getTargetConstant( 5759 IsMaskOp ? Intrinsic::riscv_vsm : Intrinsic::riscv_vse, DL, XLenVT); 5760 return DAG.getMemIntrinsicNode( 5761 ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), 5762 {Store->getChain(), IntID, NewValue, Store->getBasePtr(), VL}, 5763 Store->getMemoryVT(), Store->getMemOperand()); 5764 } 5765 5766 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op, 5767 SelectionDAG &DAG) const { 5768 SDLoc DL(Op); 5769 MVT VT = Op.getSimpleValueType(); 5770 5771 const auto *MemSD = cast<MemSDNode>(Op); 5772 EVT MemVT = MemSD->getMemoryVT(); 5773 MachineMemOperand *MMO = MemSD->getMemOperand(); 5774 SDValue Chain = MemSD->getChain(); 5775 SDValue BasePtr = MemSD->getBasePtr(); 5776 5777 SDValue Mask, PassThru, VL; 5778 if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) { 5779 Mask = VPLoad->getMask(); 5780 PassThru = DAG.getUNDEF(VT); 5781 VL = VPLoad->getVectorLength(); 5782 } else { 5783 const auto *MLoad = cast<MaskedLoadSDNode>(Op); 5784 Mask = MLoad->getMask(); 5785 PassThru = MLoad->getPassThru(); 5786 } 5787 5788 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 5789 5790 MVT XLenVT = Subtarget.getXLenVT(); 5791 5792 MVT ContainerVT = VT; 5793 if (VT.isFixedLengthVector()) { 5794 ContainerVT = getContainerForFixedLengthVector(VT); 5795 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); 5796 if (!IsUnmasked) { 5797 MVT MaskVT = getMaskTypeFor(ContainerVT); 5798 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 5799 } 5800 } 5801 5802 if (!VL) 5803 VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; 5804 5805 unsigned IntID = 5806 IsUnmasked ? Intrinsic::riscv_vle : Intrinsic::riscv_vle_mask; 5807 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)}; 5808 if (IsUnmasked) 5809 Ops.push_back(DAG.getUNDEF(ContainerVT)); 5810 else 5811 Ops.push_back(PassThru); 5812 Ops.push_back(BasePtr); 5813 if (!IsUnmasked) 5814 Ops.push_back(Mask); 5815 Ops.push_back(VL); 5816 if (!IsUnmasked) 5817 Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT)); 5818 5819 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 5820 5821 SDValue Result = 5822 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO); 5823 Chain = Result.getValue(1); 5824 5825 if (VT.isFixedLengthVector()) 5826 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 5827 5828 return DAG.getMergeValues({Result, Chain}, DL); 5829 } 5830 5831 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op, 5832 SelectionDAG &DAG) const { 5833 SDLoc DL(Op); 5834 5835 const auto *MemSD = cast<MemSDNode>(Op); 5836 EVT MemVT = MemSD->getMemoryVT(); 5837 MachineMemOperand *MMO = MemSD->getMemOperand(); 5838 SDValue Chain = MemSD->getChain(); 5839 SDValue BasePtr = MemSD->getBasePtr(); 5840 SDValue Val, Mask, VL; 5841 5842 if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) { 5843 Val = VPStore->getValue(); 5844 Mask = VPStore->getMask(); 5845 VL = VPStore->getVectorLength(); 5846 } else { 5847 const auto *MStore = cast<MaskedStoreSDNode>(Op); 5848 Val = MStore->getValue(); 5849 Mask = MStore->getMask(); 5850 } 5851 5852 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 5853 5854 MVT VT = Val.getSimpleValueType(); 5855 MVT XLenVT = Subtarget.getXLenVT(); 5856 5857 MVT ContainerVT = VT; 5858 if (VT.isFixedLengthVector()) { 5859 ContainerVT = getContainerForFixedLengthVector(VT); 5860 5861 Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); 5862 if (!IsUnmasked) { 5863 MVT MaskVT = getMaskTypeFor(ContainerVT); 5864 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 5865 } 5866 } 5867 5868 if (!VL) 5869 VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; 5870 5871 unsigned IntID = 5872 IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask; 5873 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)}; 5874 Ops.push_back(Val); 5875 Ops.push_back(BasePtr); 5876 if (!IsUnmasked) 5877 Ops.push_back(Mask); 5878 Ops.push_back(VL); 5879 5880 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, 5881 DAG.getVTList(MVT::Other), Ops, MemVT, MMO); 5882 } 5883 5884 SDValue 5885 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op, 5886 SelectionDAG &DAG) const { 5887 MVT InVT = Op.getOperand(0).getSimpleValueType(); 5888 MVT ContainerVT = getContainerForFixedLengthVector(InVT); 5889 5890 MVT VT = Op.getSimpleValueType(); 5891 5892 SDValue Op1 = 5893 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget); 5894 SDValue Op2 = 5895 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 5896 5897 SDLoc DL(Op); 5898 SDValue VL = 5899 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT()); 5900 5901 MVT MaskVT = getMaskTypeFor(ContainerVT); 5902 SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG); 5903 5904 SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2, 5905 Op.getOperand(2), Mask, VL); 5906 5907 return convertFromScalableVector(VT, Cmp, DAG, Subtarget); 5908 } 5909 5910 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV( 5911 SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const { 5912 MVT VT = Op.getSimpleValueType(); 5913 5914 if (VT.getVectorElementType() == MVT::i1) 5915 return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false); 5916 5917 return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true); 5918 } 5919 5920 SDValue 5921 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op, 5922 SelectionDAG &DAG) const { 5923 unsigned Opc; 5924 switch (Op.getOpcode()) { 5925 default: llvm_unreachable("Unexpected opcode!"); 5926 case ISD::SHL: Opc = RISCVISD::SHL_VL; break; 5927 case ISD::SRA: Opc = RISCVISD::SRA_VL; break; 5928 case ISD::SRL: Opc = RISCVISD::SRL_VL; break; 5929 } 5930 5931 return lowerToScalableOp(Op, DAG, Opc); 5932 } 5933 5934 // Lower vector ABS to smax(X, sub(0, X)). 5935 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const { 5936 SDLoc DL(Op); 5937 MVT VT = Op.getSimpleValueType(); 5938 SDValue X = Op.getOperand(0); 5939 5940 assert(VT.isFixedLengthVector() && "Unexpected type"); 5941 5942 MVT ContainerVT = getContainerForFixedLengthVector(VT); 5943 X = convertToScalableVector(ContainerVT, X, DAG, Subtarget); 5944 5945 SDValue Mask, VL; 5946 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 5947 5948 SDValue SplatZero = DAG.getNode( 5949 RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT), 5950 DAG.getConstant(0, DL, Subtarget.getXLenVT())); 5951 SDValue NegX = 5952 DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL); 5953 SDValue Max = 5954 DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL); 5955 5956 return convertFromScalableVector(VT, Max, DAG, Subtarget); 5957 } 5958 5959 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV( 5960 SDValue Op, SelectionDAG &DAG) const { 5961 SDLoc DL(Op); 5962 MVT VT = Op.getSimpleValueType(); 5963 SDValue Mag = Op.getOperand(0); 5964 SDValue Sign = Op.getOperand(1); 5965 assert(Mag.getValueType() == Sign.getValueType() && 5966 "Can only handle COPYSIGN with matching types."); 5967 5968 MVT ContainerVT = getContainerForFixedLengthVector(VT); 5969 Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget); 5970 Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget); 5971 5972 SDValue Mask, VL; 5973 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 5974 5975 SDValue CopySign = 5976 DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL); 5977 5978 return convertFromScalableVector(VT, CopySign, DAG, Subtarget); 5979 } 5980 5981 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV( 5982 SDValue Op, SelectionDAG &DAG) const { 5983 MVT VT = Op.getSimpleValueType(); 5984 MVT ContainerVT = getContainerForFixedLengthVector(VT); 5985 5986 MVT I1ContainerVT = 5987 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 5988 5989 SDValue CC = 5990 convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget); 5991 SDValue Op1 = 5992 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget); 5993 SDValue Op2 = 5994 convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget); 5995 5996 SDLoc DL(Op); 5997 SDValue Mask, VL; 5998 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 5999 6000 SDValue Select = 6001 DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL); 6002 6003 return convertFromScalableVector(VT, Select, DAG, Subtarget); 6004 } 6005 6006 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG, 6007 unsigned NewOpc, 6008 bool HasMask) const { 6009 MVT VT = Op.getSimpleValueType(); 6010 MVT ContainerVT = getContainerForFixedLengthVector(VT); 6011 6012 // Create list of operands by converting existing ones to scalable types. 6013 SmallVector<SDValue, 6> Ops; 6014 for (const SDValue &V : Op->op_values()) { 6015 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!"); 6016 6017 // Pass through non-vector operands. 6018 if (!V.getValueType().isVector()) { 6019 Ops.push_back(V); 6020 continue; 6021 } 6022 6023 // "cast" fixed length vector to a scalable vector. 6024 assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) && 6025 "Only fixed length vectors are supported!"); 6026 Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); 6027 } 6028 6029 SDLoc DL(Op); 6030 SDValue Mask, VL; 6031 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); 6032 if (HasMask) 6033 Ops.push_back(Mask); 6034 Ops.push_back(VL); 6035 6036 SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops); 6037 return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget); 6038 } 6039 6040 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node: 6041 // * Operands of each node are assumed to be in the same order. 6042 // * The EVL operand is promoted from i32 to i64 on RV64. 6043 // * Fixed-length vectors are converted to their scalable-vector container 6044 // types. 6045 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG, 6046 unsigned RISCVISDOpc) const { 6047 SDLoc DL(Op); 6048 MVT VT = Op.getSimpleValueType(); 6049 SmallVector<SDValue, 4> Ops; 6050 6051 for (const auto &OpIdx : enumerate(Op->ops())) { 6052 SDValue V = OpIdx.value(); 6053 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!"); 6054 // Pass through operands which aren't fixed-length vectors. 6055 if (!V.getValueType().isFixedLengthVector()) { 6056 Ops.push_back(V); 6057 continue; 6058 } 6059 // "cast" fixed length vector to a scalable vector. 6060 MVT OpVT = V.getSimpleValueType(); 6061 MVT ContainerVT = getContainerForFixedLengthVector(OpVT); 6062 assert(useRVVForFixedLengthVectorVT(OpVT) && 6063 "Only fixed length vectors are supported!"); 6064 Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget)); 6065 } 6066 6067 if (!VT.isFixedLengthVector()) 6068 return DAG.getNode(RISCVISDOpc, DL, VT, Ops); 6069 6070 MVT ContainerVT = getContainerForFixedLengthVector(VT); 6071 6072 SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops); 6073 6074 return convertFromScalableVector(VT, VPOp, DAG, Subtarget); 6075 } 6076 6077 SDValue RISCVTargetLowering::lowerVPExtMaskOp(SDValue Op, 6078 SelectionDAG &DAG) const { 6079 SDLoc DL(Op); 6080 MVT VT = Op.getSimpleValueType(); 6081 6082 SDValue Src = Op.getOperand(0); 6083 // NOTE: Mask is dropped. 6084 SDValue VL = Op.getOperand(2); 6085 6086 MVT ContainerVT = VT; 6087 if (VT.isFixedLengthVector()) { 6088 ContainerVT = getContainerForFixedLengthVector(VT); 6089 MVT SrcVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount()); 6090 Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget); 6091 } 6092 6093 MVT XLenVT = Subtarget.getXLenVT(); 6094 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 6095 SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 6096 DAG.getUNDEF(ContainerVT), Zero, VL); 6097 6098 SDValue SplatValue = DAG.getConstant( 6099 Op.getOpcode() == ISD::VP_ZERO_EXTEND ? 1 : -1, DL, XLenVT); 6100 SDValue Splat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 6101 DAG.getUNDEF(ContainerVT), SplatValue, VL); 6102 6103 SDValue Result = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, Src, 6104 Splat, ZeroSplat, VL); 6105 if (!VT.isFixedLengthVector()) 6106 return Result; 6107 return convertFromScalableVector(VT, Result, DAG, Subtarget); 6108 } 6109 6110 SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op, 6111 SelectionDAG &DAG) const { 6112 SDLoc DL(Op); 6113 MVT VT = Op.getSimpleValueType(); 6114 6115 SDValue Op1 = Op.getOperand(0); 6116 SDValue Op2 = Op.getOperand(1); 6117 ISD::CondCode Condition = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 6118 // NOTE: Mask is dropped. 6119 SDValue VL = Op.getOperand(4); 6120 6121 MVT ContainerVT = VT; 6122 if (VT.isFixedLengthVector()) { 6123 ContainerVT = getContainerForFixedLengthVector(VT); 6124 Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget); 6125 Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget); 6126 } 6127 6128 SDValue Result; 6129 SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); 6130 6131 switch (Condition) { 6132 default: 6133 break; 6134 // X != Y --> (X^Y) 6135 case ISD::SETNE: 6136 Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL); 6137 break; 6138 // X == Y --> ~(X^Y) 6139 case ISD::SETEQ: { 6140 SDValue Temp = 6141 DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL); 6142 Result = 6143 DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL); 6144 break; 6145 } 6146 // X >s Y --> X == 0 & Y == 1 --> ~X & Y 6147 // X <u Y --> X == 0 & Y == 1 --> ~X & Y 6148 case ISD::SETGT: 6149 case ISD::SETULT: { 6150 SDValue Temp = 6151 DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL); 6152 Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL); 6153 break; 6154 } 6155 // X <s Y --> X == 1 & Y == 0 --> ~Y & X 6156 // X >u Y --> X == 1 & Y == 0 --> ~Y & X 6157 case ISD::SETLT: 6158 case ISD::SETUGT: { 6159 SDValue Temp = 6160 DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL); 6161 Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL); 6162 break; 6163 } 6164 // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 6165 // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 6166 case ISD::SETGE: 6167 case ISD::SETULE: { 6168 SDValue Temp = 6169 DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL); 6170 Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL); 6171 break; 6172 } 6173 // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 6174 // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 6175 case ISD::SETLE: 6176 case ISD::SETUGE: { 6177 SDValue Temp = 6178 DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL); 6179 Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL); 6180 break; 6181 } 6182 } 6183 6184 if (!VT.isFixedLengthVector()) 6185 return Result; 6186 return convertFromScalableVector(VT, Result, DAG, Subtarget); 6187 } 6188 6189 // Lower Floating-Point/Integer Type-Convert VP SDNodes 6190 SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG, 6191 unsigned RISCVISDOpc) const { 6192 SDLoc DL(Op); 6193 6194 SDValue Src = Op.getOperand(0); 6195 SDValue Mask = Op.getOperand(1); 6196 SDValue VL = Op.getOperand(2); 6197 6198 MVT DstVT = Op.getSimpleValueType(); 6199 MVT SrcVT = Src.getSimpleValueType(); 6200 if (DstVT.isFixedLengthVector()) { 6201 DstVT = getContainerForFixedLengthVector(DstVT); 6202 SrcVT = getContainerForFixedLengthVector(SrcVT); 6203 Src = convertToScalableVector(SrcVT, Src, DAG, Subtarget); 6204 MVT MaskVT = getMaskTypeFor(DstVT); 6205 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 6206 } 6207 6208 unsigned RISCVISDExtOpc = (RISCVISDOpc == RISCVISD::SINT_TO_FP_VL || 6209 RISCVISDOpc == RISCVISD::FP_TO_SINT_VL) 6210 ? RISCVISD::VSEXT_VL 6211 : RISCVISD::VZEXT_VL; 6212 6213 unsigned DstEltSize = DstVT.getScalarSizeInBits(); 6214 unsigned SrcEltSize = SrcVT.getScalarSizeInBits(); 6215 6216 SDValue Result; 6217 if (DstEltSize >= SrcEltSize) { // Single-width and widening conversion. 6218 if (SrcVT.isInteger()) { 6219 assert(DstVT.isFloatingPoint() && "Wrong input/output vector types"); 6220 6221 // Do we need to do any pre-widening before converting? 6222 if (SrcEltSize == 1) { 6223 MVT IntVT = DstVT.changeVectorElementTypeToInteger(); 6224 MVT XLenVT = Subtarget.getXLenVT(); 6225 SDValue Zero = DAG.getConstant(0, DL, XLenVT); 6226 SDValue ZeroSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, 6227 DAG.getUNDEF(IntVT), Zero, VL); 6228 SDValue One = DAG.getConstant( 6229 RISCVISDExtOpc == RISCVISD::VZEXT_VL ? 1 : -1, DL, XLenVT); 6230 SDValue OneSplat = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, IntVT, 6231 DAG.getUNDEF(IntVT), One, VL); 6232 Src = DAG.getNode(RISCVISD::VSELECT_VL, DL, IntVT, Src, OneSplat, 6233 ZeroSplat, VL); 6234 } else if (DstEltSize > (2 * SrcEltSize)) { 6235 // Widen before converting. 6236 MVT IntVT = MVT::getVectorVT(MVT::getIntegerVT(DstEltSize / 2), 6237 DstVT.getVectorElementCount()); 6238 Src = DAG.getNode(RISCVISDExtOpc, DL, IntVT, Src, Mask, VL); 6239 } 6240 6241 Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL); 6242 } else { 6243 assert(SrcVT.isFloatingPoint() && DstVT.isInteger() && 6244 "Wrong input/output vector types"); 6245 6246 // Convert f16 to f32 then convert f32 to i64. 6247 if (DstEltSize > (2 * SrcEltSize)) { 6248 assert(SrcVT.getVectorElementType() == MVT::f16 && "Unexpected type!"); 6249 MVT InterimFVT = 6250 MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount()); 6251 Src = 6252 DAG.getNode(RISCVISD::FP_EXTEND_VL, DL, InterimFVT, Src, Mask, VL); 6253 } 6254 6255 Result = DAG.getNode(RISCVISDOpc, DL, DstVT, Src, Mask, VL); 6256 } 6257 } else { // Narrowing + Conversion 6258 if (SrcVT.isInteger()) { 6259 assert(DstVT.isFloatingPoint() && "Wrong input/output vector types"); 6260 // First do a narrowing convert to an FP type half the size, then round 6261 // the FP type to a small FP type if needed. 6262 6263 MVT InterimFVT = DstVT; 6264 if (SrcEltSize > (2 * DstEltSize)) { 6265 assert(SrcEltSize == (4 * DstEltSize) && "Unexpected types!"); 6266 assert(DstVT.getVectorElementType() == MVT::f16 && "Unexpected type!"); 6267 InterimFVT = MVT::getVectorVT(MVT::f32, DstVT.getVectorElementCount()); 6268 } 6269 6270 Result = DAG.getNode(RISCVISDOpc, DL, InterimFVT, Src, Mask, VL); 6271 6272 if (InterimFVT != DstVT) { 6273 Src = Result; 6274 Result = DAG.getNode(RISCVISD::FP_ROUND_VL, DL, DstVT, Src, Mask, VL); 6275 } 6276 } else { 6277 assert(SrcVT.isFloatingPoint() && DstVT.isInteger() && 6278 "Wrong input/output vector types"); 6279 // First do a narrowing conversion to an integer half the size, then 6280 // truncate if needed. 6281 6282 if (DstEltSize == 1) { 6283 // First convert to the same size integer, then convert to mask using 6284 // setcc. 6285 assert(SrcEltSize >= 16 && "Unexpected FP type!"); 6286 MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize), 6287 DstVT.getVectorElementCount()); 6288 Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL); 6289 6290 // Compare the integer result to 0. The integer should be 0 or 1/-1, 6291 // otherwise the conversion was undefined. 6292 MVT XLenVT = Subtarget.getXLenVT(); 6293 SDValue SplatZero = DAG.getConstant(0, DL, XLenVT); 6294 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, InterimIVT, 6295 DAG.getUNDEF(InterimIVT), SplatZero); 6296 Result = DAG.getNode(RISCVISD::SETCC_VL, DL, DstVT, Result, SplatZero, 6297 DAG.getCondCode(ISD::SETNE), Mask, VL); 6298 } else { 6299 MVT InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2), 6300 DstVT.getVectorElementCount()); 6301 6302 Result = DAG.getNode(RISCVISDOpc, DL, InterimIVT, Src, Mask, VL); 6303 6304 while (InterimIVT != DstVT) { 6305 SrcEltSize /= 2; 6306 Src = Result; 6307 InterimIVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize / 2), 6308 DstVT.getVectorElementCount()); 6309 Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, InterimIVT, 6310 Src, Mask, VL); 6311 } 6312 } 6313 } 6314 } 6315 6316 MVT VT = Op.getSimpleValueType(); 6317 if (!VT.isFixedLengthVector()) 6318 return Result; 6319 return convertFromScalableVector(VT, Result, DAG, Subtarget); 6320 } 6321 6322 SDValue RISCVTargetLowering::lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, 6323 unsigned MaskOpc, 6324 unsigned VecOpc) const { 6325 MVT VT = Op.getSimpleValueType(); 6326 if (VT.getVectorElementType() != MVT::i1) 6327 return lowerVPOp(Op, DAG, VecOpc); 6328 6329 // It is safe to drop mask parameter as masked-off elements are undef. 6330 SDValue Op1 = Op->getOperand(0); 6331 SDValue Op2 = Op->getOperand(1); 6332 SDValue VL = Op->getOperand(3); 6333 6334 MVT ContainerVT = VT; 6335 const bool IsFixed = VT.isFixedLengthVector(); 6336 if (IsFixed) { 6337 ContainerVT = getContainerForFixedLengthVector(VT); 6338 Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget); 6339 Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget); 6340 } 6341 6342 SDLoc DL(Op); 6343 SDValue Val = DAG.getNode(MaskOpc, DL, ContainerVT, Op1, Op2, VL); 6344 if (!IsFixed) 6345 return Val; 6346 return convertFromScalableVector(VT, Val, DAG, Subtarget); 6347 } 6348 6349 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be 6350 // matched to a RVV indexed load. The RVV indexed load instructions only 6351 // support the "unsigned unscaled" addressing mode; indices are implicitly 6352 // zero-extended or truncated to XLEN and are treated as byte offsets. Any 6353 // signed or scaled indexing is extended to the XLEN value type and scaled 6354 // accordingly. 6355 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op, 6356 SelectionDAG &DAG) const { 6357 SDLoc DL(Op); 6358 MVT VT = Op.getSimpleValueType(); 6359 6360 const auto *MemSD = cast<MemSDNode>(Op.getNode()); 6361 EVT MemVT = MemSD->getMemoryVT(); 6362 MachineMemOperand *MMO = MemSD->getMemOperand(); 6363 SDValue Chain = MemSD->getChain(); 6364 SDValue BasePtr = MemSD->getBasePtr(); 6365 6366 ISD::LoadExtType LoadExtType; 6367 SDValue Index, Mask, PassThru, VL; 6368 6369 if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) { 6370 Index = VPGN->getIndex(); 6371 Mask = VPGN->getMask(); 6372 PassThru = DAG.getUNDEF(VT); 6373 VL = VPGN->getVectorLength(); 6374 // VP doesn't support extending loads. 6375 LoadExtType = ISD::NON_EXTLOAD; 6376 } else { 6377 // Else it must be a MGATHER. 6378 auto *MGN = cast<MaskedGatherSDNode>(Op.getNode()); 6379 Index = MGN->getIndex(); 6380 Mask = MGN->getMask(); 6381 PassThru = MGN->getPassThru(); 6382 LoadExtType = MGN->getExtensionType(); 6383 } 6384 6385 MVT IndexVT = Index.getSimpleValueType(); 6386 MVT XLenVT = Subtarget.getXLenVT(); 6387 6388 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 6389 "Unexpected VTs!"); 6390 assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type"); 6391 // Targets have to explicitly opt-in for extending vector loads. 6392 assert(LoadExtType == ISD::NON_EXTLOAD && 6393 "Unexpected extending MGATHER/VP_GATHER"); 6394 (void)LoadExtType; 6395 6396 // If the mask is known to be all ones, optimize to an unmasked intrinsic; 6397 // the selection of the masked intrinsics doesn't do this for us. 6398 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 6399 6400 MVT ContainerVT = VT; 6401 if (VT.isFixedLengthVector()) { 6402 // We need to use the larger of the result and index type to determine the 6403 // scalable type to use so we don't increase LMUL for any operand/result. 6404 if (VT.bitsGE(IndexVT)) { 6405 ContainerVT = getContainerForFixedLengthVector(VT); 6406 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), 6407 ContainerVT.getVectorElementCount()); 6408 } else { 6409 IndexVT = getContainerForFixedLengthVector(IndexVT); 6410 ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(), 6411 IndexVT.getVectorElementCount()); 6412 } 6413 6414 Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget); 6415 6416 if (!IsUnmasked) { 6417 MVT MaskVT = getMaskTypeFor(ContainerVT); 6418 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 6419 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget); 6420 } 6421 } 6422 6423 if (!VL) 6424 VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; 6425 6426 if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) { 6427 IndexVT = IndexVT.changeVectorElementType(XLenVT); 6428 SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(), 6429 VL); 6430 Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index, 6431 TrueMask, VL); 6432 } 6433 6434 unsigned IntID = 6435 IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask; 6436 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)}; 6437 if (IsUnmasked) 6438 Ops.push_back(DAG.getUNDEF(ContainerVT)); 6439 else 6440 Ops.push_back(PassThru); 6441 Ops.push_back(BasePtr); 6442 Ops.push_back(Index); 6443 if (!IsUnmasked) 6444 Ops.push_back(Mask); 6445 Ops.push_back(VL); 6446 if (!IsUnmasked) 6447 Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT)); 6448 6449 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); 6450 SDValue Result = 6451 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO); 6452 Chain = Result.getValue(1); 6453 6454 if (VT.isFixedLengthVector()) 6455 Result = convertFromScalableVector(VT, Result, DAG, Subtarget); 6456 6457 return DAG.getMergeValues({Result, Chain}, DL); 6458 } 6459 6460 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be 6461 // matched to a RVV indexed store. The RVV indexed store instructions only 6462 // support the "unsigned unscaled" addressing mode; indices are implicitly 6463 // zero-extended or truncated to XLEN and are treated as byte offsets. Any 6464 // signed or scaled indexing is extended to the XLEN value type and scaled 6465 // accordingly. 6466 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op, 6467 SelectionDAG &DAG) const { 6468 SDLoc DL(Op); 6469 const auto *MemSD = cast<MemSDNode>(Op.getNode()); 6470 EVT MemVT = MemSD->getMemoryVT(); 6471 MachineMemOperand *MMO = MemSD->getMemOperand(); 6472 SDValue Chain = MemSD->getChain(); 6473 SDValue BasePtr = MemSD->getBasePtr(); 6474 6475 bool IsTruncatingStore = false; 6476 SDValue Index, Mask, Val, VL; 6477 6478 if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) { 6479 Index = VPSN->getIndex(); 6480 Mask = VPSN->getMask(); 6481 Val = VPSN->getValue(); 6482 VL = VPSN->getVectorLength(); 6483 // VP doesn't support truncating stores. 6484 IsTruncatingStore = false; 6485 } else { 6486 // Else it must be a MSCATTER. 6487 auto *MSN = cast<MaskedScatterSDNode>(Op.getNode()); 6488 Index = MSN->getIndex(); 6489 Mask = MSN->getMask(); 6490 Val = MSN->getValue(); 6491 IsTruncatingStore = MSN->isTruncatingStore(); 6492 } 6493 6494 MVT VT = Val.getSimpleValueType(); 6495 MVT IndexVT = Index.getSimpleValueType(); 6496 MVT XLenVT = Subtarget.getXLenVT(); 6497 6498 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 6499 "Unexpected VTs!"); 6500 assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type"); 6501 // Targets have to explicitly opt-in for extending vector loads and 6502 // truncating vector stores. 6503 assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER"); 6504 (void)IsTruncatingStore; 6505 6506 // If the mask is known to be all ones, optimize to an unmasked intrinsic; 6507 // the selection of the masked intrinsics doesn't do this for us. 6508 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode()); 6509 6510 MVT ContainerVT = VT; 6511 if (VT.isFixedLengthVector()) { 6512 // We need to use the larger of the value and index type to determine the 6513 // scalable type to use so we don't increase LMUL for any operand/result. 6514 if (VT.bitsGE(IndexVT)) { 6515 ContainerVT = getContainerForFixedLengthVector(VT); 6516 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), 6517 ContainerVT.getVectorElementCount()); 6518 } else { 6519 IndexVT = getContainerForFixedLengthVector(IndexVT); 6520 ContainerVT = MVT::getVectorVT(VT.getVectorElementType(), 6521 IndexVT.getVectorElementCount()); 6522 } 6523 6524 Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget); 6525 Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget); 6526 6527 if (!IsUnmasked) { 6528 MVT MaskVT = getMaskTypeFor(ContainerVT); 6529 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget); 6530 } 6531 } 6532 6533 if (!VL) 6534 VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second; 6535 6536 if (XLenVT == MVT::i32 && IndexVT.getVectorElementType().bitsGT(XLenVT)) { 6537 IndexVT = IndexVT.changeVectorElementType(XLenVT); 6538 SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, Mask.getValueType(), 6539 VL); 6540 Index = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, IndexVT, Index, 6541 TrueMask, VL); 6542 } 6543 6544 unsigned IntID = 6545 IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask; 6546 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)}; 6547 Ops.push_back(Val); 6548 Ops.push_back(BasePtr); 6549 Ops.push_back(Index); 6550 if (!IsUnmasked) 6551 Ops.push_back(Mask); 6552 Ops.push_back(VL); 6553 6554 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, 6555 DAG.getVTList(MVT::Other), Ops, MemVT, MMO); 6556 } 6557 6558 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op, 6559 SelectionDAG &DAG) const { 6560 const MVT XLenVT = Subtarget.getXLenVT(); 6561 SDLoc DL(Op); 6562 SDValue Chain = Op->getOperand(0); 6563 SDValue SysRegNo = DAG.getTargetConstant( 6564 RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT); 6565 SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other); 6566 SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo); 6567 6568 // Encoding used for rounding mode in RISCV differs from that used in 6569 // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a 6570 // table, which consists of a sequence of 4-bit fields, each representing 6571 // corresponding FLT_ROUNDS mode. 6572 static const int Table = 6573 (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) | 6574 (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) | 6575 (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) | 6576 (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) | 6577 (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM); 6578 6579 SDValue Shift = 6580 DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT)); 6581 SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT, 6582 DAG.getConstant(Table, DL, XLenVT), Shift); 6583 SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted, 6584 DAG.getConstant(7, DL, XLenVT)); 6585 6586 return DAG.getMergeValues({Masked, Chain}, DL); 6587 } 6588 6589 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op, 6590 SelectionDAG &DAG) const { 6591 const MVT XLenVT = Subtarget.getXLenVT(); 6592 SDLoc DL(Op); 6593 SDValue Chain = Op->getOperand(0); 6594 SDValue RMValue = Op->getOperand(1); 6595 SDValue SysRegNo = DAG.getTargetConstant( 6596 RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT); 6597 6598 // Encoding used for rounding mode in RISCV differs from that used in 6599 // FLT_ROUNDS. To convert it the C rounding mode is used as an index in 6600 // a table, which consists of a sequence of 4-bit fields, each representing 6601 // corresponding RISCV mode. 6602 static const unsigned Table = 6603 (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) | 6604 (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) | 6605 (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) | 6606 (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) | 6607 (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway)); 6608 6609 SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue, 6610 DAG.getConstant(2, DL, XLenVT)); 6611 SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT, 6612 DAG.getConstant(Table, DL, XLenVT), Shift); 6613 RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted, 6614 DAG.getConstant(0x7, DL, XLenVT)); 6615 return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo, 6616 RMValue); 6617 } 6618 6619 static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) { 6620 switch (IntNo) { 6621 default: 6622 llvm_unreachable("Unexpected Intrinsic"); 6623 case Intrinsic::riscv_bcompress: 6624 return RISCVISD::BCOMPRESSW; 6625 case Intrinsic::riscv_bdecompress: 6626 return RISCVISD::BDECOMPRESSW; 6627 case Intrinsic::riscv_bfp: 6628 return RISCVISD::BFPW; 6629 case Intrinsic::riscv_fsl: 6630 return RISCVISD::FSLW; 6631 case Intrinsic::riscv_fsr: 6632 return RISCVISD::FSRW; 6633 } 6634 } 6635 6636 // Converts the given intrinsic to a i64 operation with any extension. 6637 static SDValue customLegalizeToWOpByIntr(SDNode *N, SelectionDAG &DAG, 6638 unsigned IntNo) { 6639 SDLoc DL(N); 6640 RISCVISD::NodeType WOpcode = getRISCVWOpcodeByIntr(IntNo); 6641 // Deal with the Instruction Operands 6642 SmallVector<SDValue, 3> NewOps; 6643 for (SDValue Op : drop_begin(N->ops())) 6644 // Promote the operand to i64 type 6645 NewOps.push_back(DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op)); 6646 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOps); 6647 // ReplaceNodeResults requires we maintain the same type for the return value. 6648 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes); 6649 } 6650 6651 // Returns the opcode of the target-specific SDNode that implements the 32-bit 6652 // form of the given Opcode. 6653 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) { 6654 switch (Opcode) { 6655 default: 6656 llvm_unreachable("Unexpected opcode"); 6657 case ISD::SHL: 6658 return RISCVISD::SLLW; 6659 case ISD::SRA: 6660 return RISCVISD::SRAW; 6661 case ISD::SRL: 6662 return RISCVISD::SRLW; 6663 case ISD::SDIV: 6664 return RISCVISD::DIVW; 6665 case ISD::UDIV: 6666 return RISCVISD::DIVUW; 6667 case ISD::UREM: 6668 return RISCVISD::REMUW; 6669 case ISD::ROTL: 6670 return RISCVISD::ROLW; 6671 case ISD::ROTR: 6672 return RISCVISD::RORW; 6673 } 6674 } 6675 6676 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG 6677 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would 6678 // otherwise be promoted to i64, making it difficult to select the 6679 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of 6680 // type i8/i16/i32 is lost. 6681 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG, 6682 unsigned ExtOpc = ISD::ANY_EXTEND) { 6683 SDLoc DL(N); 6684 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode()); 6685 SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0)); 6686 SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1)); 6687 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1); 6688 // ReplaceNodeResults requires we maintain the same type for the return value. 6689 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes); 6690 } 6691 6692 // Converts the given 32-bit operation to a i64 operation with signed extension 6693 // semantic to reduce the signed extension instructions. 6694 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) { 6695 SDLoc DL(N); 6696 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 6697 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 6698 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1); 6699 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 6700 DAG.getValueType(MVT::i32)); 6701 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes); 6702 } 6703 6704 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N, 6705 SmallVectorImpl<SDValue> &Results, 6706 SelectionDAG &DAG) const { 6707 SDLoc DL(N); 6708 switch (N->getOpcode()) { 6709 default: 6710 llvm_unreachable("Don't know how to custom type legalize this operation!"); 6711 case ISD::STRICT_FP_TO_SINT: 6712 case ISD::STRICT_FP_TO_UINT: 6713 case ISD::FP_TO_SINT: 6714 case ISD::FP_TO_UINT: { 6715 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 6716 "Unexpected custom legalisation"); 6717 bool IsStrict = N->isStrictFPOpcode(); 6718 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT || 6719 N->getOpcode() == ISD::STRICT_FP_TO_SINT; 6720 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0); 6721 if (getTypeAction(*DAG.getContext(), Op0.getValueType()) != 6722 TargetLowering::TypeSoftenFloat) { 6723 if (!isTypeLegal(Op0.getValueType())) 6724 return; 6725 if (IsStrict) { 6726 unsigned Opc = IsSigned ? RISCVISD::STRICT_FCVT_W_RV64 6727 : RISCVISD::STRICT_FCVT_WU_RV64; 6728 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other); 6729 SDValue Res = DAG.getNode( 6730 Opc, DL, VTs, N->getOperand(0), Op0, 6731 DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64)); 6732 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 6733 Results.push_back(Res.getValue(1)); 6734 return; 6735 } 6736 unsigned Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64; 6737 SDValue Res = 6738 DAG.getNode(Opc, DL, MVT::i64, Op0, 6739 DAG.getTargetConstant(RISCVFPRndMode::RTZ, DL, MVT::i64)); 6740 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 6741 return; 6742 } 6743 // If the FP type needs to be softened, emit a library call using the 'si' 6744 // version. If we left it to default legalization we'd end up with 'di'. If 6745 // the FP type doesn't need to be softened just let generic type 6746 // legalization promote the result type. 6747 RTLIB::Libcall LC; 6748 if (IsSigned) 6749 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0)); 6750 else 6751 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0)); 6752 MakeLibCallOptions CallOptions; 6753 EVT OpVT = Op0.getValueType(); 6754 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true); 6755 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 6756 SDValue Result; 6757 std::tie(Result, Chain) = 6758 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain); 6759 Results.push_back(Result); 6760 if (IsStrict) 6761 Results.push_back(Chain); 6762 break; 6763 } 6764 case ISD::READCYCLECOUNTER: { 6765 assert(!Subtarget.is64Bit() && 6766 "READCYCLECOUNTER only has custom type legalization on riscv32"); 6767 6768 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other); 6769 SDValue RCW = 6770 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0)); 6771 6772 Results.push_back( 6773 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1))); 6774 Results.push_back(RCW.getValue(2)); 6775 break; 6776 } 6777 case ISD::MUL: { 6778 unsigned Size = N->getSimpleValueType(0).getSizeInBits(); 6779 unsigned XLen = Subtarget.getXLen(); 6780 // This multiply needs to be expanded, try to use MULHSU+MUL if possible. 6781 if (Size > XLen) { 6782 assert(Size == (XLen * 2) && "Unexpected custom legalisation"); 6783 SDValue LHS = N->getOperand(0); 6784 SDValue RHS = N->getOperand(1); 6785 APInt HighMask = APInt::getHighBitsSet(Size, XLen); 6786 6787 bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask); 6788 bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask); 6789 // We need exactly one side to be unsigned. 6790 if (LHSIsU == RHSIsU) 6791 return; 6792 6793 auto MakeMULPair = [&](SDValue S, SDValue U) { 6794 MVT XLenVT = Subtarget.getXLenVT(); 6795 S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S); 6796 U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U); 6797 SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U); 6798 SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U); 6799 return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi); 6800 }; 6801 6802 bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen; 6803 bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen; 6804 6805 // The other operand should be signed, but still prefer MULH when 6806 // possible. 6807 if (RHSIsU && LHSIsS && !RHSIsS) 6808 Results.push_back(MakeMULPair(LHS, RHS)); 6809 else if (LHSIsU && RHSIsS && !LHSIsS) 6810 Results.push_back(MakeMULPair(RHS, LHS)); 6811 6812 return; 6813 } 6814 LLVM_FALLTHROUGH; 6815 } 6816 case ISD::ADD: 6817 case ISD::SUB: 6818 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 6819 "Unexpected custom legalisation"); 6820 Results.push_back(customLegalizeToWOpWithSExt(N, DAG)); 6821 break; 6822 case ISD::SHL: 6823 case ISD::SRA: 6824 case ISD::SRL: 6825 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 6826 "Unexpected custom legalisation"); 6827 if (N->getOperand(1).getOpcode() != ISD::Constant) { 6828 // If we can use a BSET instruction, allow default promotion to apply. 6829 if (N->getOpcode() == ISD::SHL && Subtarget.hasStdExtZbs() && 6830 isOneConstant(N->getOperand(0))) 6831 break; 6832 Results.push_back(customLegalizeToWOp(N, DAG)); 6833 break; 6834 } 6835 6836 // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is 6837 // similar to customLegalizeToWOpWithSExt, but we must zero_extend the 6838 // shift amount. 6839 if (N->getOpcode() == ISD::SHL) { 6840 SDLoc DL(N); 6841 SDValue NewOp0 = 6842 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 6843 SDValue NewOp1 = 6844 DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1)); 6845 SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1); 6846 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp, 6847 DAG.getValueType(MVT::i32)); 6848 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 6849 } 6850 6851 break; 6852 case ISD::ROTL: 6853 case ISD::ROTR: 6854 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 6855 "Unexpected custom legalisation"); 6856 Results.push_back(customLegalizeToWOp(N, DAG)); 6857 break; 6858 case ISD::CTTZ: 6859 case ISD::CTTZ_ZERO_UNDEF: 6860 case ISD::CTLZ: 6861 case ISD::CTLZ_ZERO_UNDEF: { 6862 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 6863 "Unexpected custom legalisation"); 6864 6865 SDValue NewOp0 = 6866 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 6867 bool IsCTZ = 6868 N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF; 6869 unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW; 6870 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0); 6871 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 6872 return; 6873 } 6874 case ISD::SDIV: 6875 case ISD::UDIV: 6876 case ISD::UREM: { 6877 MVT VT = N->getSimpleValueType(0); 6878 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) && 6879 Subtarget.is64Bit() && Subtarget.hasStdExtM() && 6880 "Unexpected custom legalisation"); 6881 // Don't promote division/remainder by constant since we should expand those 6882 // to multiply by magic constant. 6883 // FIXME: What if the expansion is disabled for minsize. 6884 if (N->getOperand(1).getOpcode() == ISD::Constant) 6885 return; 6886 6887 // If the input is i32, use ANY_EXTEND since the W instructions don't read 6888 // the upper 32 bits. For other types we need to sign or zero extend 6889 // based on the opcode. 6890 unsigned ExtOpc = ISD::ANY_EXTEND; 6891 if (VT != MVT::i32) 6892 ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND 6893 : ISD::ZERO_EXTEND; 6894 6895 Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc)); 6896 break; 6897 } 6898 case ISD::UADDO: 6899 case ISD::USUBO: { 6900 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 6901 "Unexpected custom legalisation"); 6902 bool IsAdd = N->getOpcode() == ISD::UADDO; 6903 // Create an ADDW or SUBW. 6904 SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 6905 SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 6906 SDValue Res = 6907 DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS); 6908 Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res, 6909 DAG.getValueType(MVT::i32)); 6910 6911 SDValue Overflow; 6912 if (IsAdd && isOneConstant(RHS)) { 6913 // Special case uaddo X, 1 overflowed if the addition result is 0. 6914 // The general case (X + C) < C is not necessarily beneficial. Although we 6915 // reduce the live range of X, we may introduce the materialization of 6916 // constant C, especially when the setcc result is used by branch. We have 6917 // no compare with constant and branch instructions. 6918 Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, 6919 DAG.getConstant(0, DL, MVT::i64), ISD::SETEQ); 6920 } else { 6921 // Sign extend the LHS and perform an unsigned compare with the ADDW 6922 // result. Since the inputs are sign extended from i32, this is equivalent 6923 // to comparing the lower 32 bits. 6924 LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); 6925 Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS, 6926 IsAdd ? ISD::SETULT : ISD::SETUGT); 6927 } 6928 6929 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 6930 Results.push_back(Overflow); 6931 return; 6932 } 6933 case ISD::UADDSAT: 6934 case ISD::USUBSAT: { 6935 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 6936 "Unexpected custom legalisation"); 6937 if (Subtarget.hasStdExtZbb()) { 6938 // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using 6939 // sign extend allows overflow of the lower 32 bits to be detected on 6940 // the promoted size. 6941 SDValue LHS = 6942 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); 6943 SDValue RHS = 6944 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1)); 6945 SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS); 6946 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 6947 return; 6948 } 6949 6950 // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom 6951 // promotion for UADDO/USUBO. 6952 Results.push_back(expandAddSubSat(N, DAG)); 6953 return; 6954 } 6955 case ISD::ABS: { 6956 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 6957 "Unexpected custom legalisation"); 6958 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0)); 6959 6960 // Expand abs to Y = (sraiw X, 31); subw(xor(X, Y), Y) 6961 6962 SDValue Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 6963 6964 // Freeze the source so we can increase it's use count. 6965 Src = DAG.getFreeze(Src); 6966 6967 // Copy sign bit to all bits using the sraiw pattern. 6968 SDValue SignFill = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Src, 6969 DAG.getValueType(MVT::i32)); 6970 SignFill = DAG.getNode(ISD::SRA, DL, MVT::i64, SignFill, 6971 DAG.getConstant(31, DL, MVT::i64)); 6972 6973 SDValue NewRes = DAG.getNode(ISD::XOR, DL, MVT::i64, Src, SignFill); 6974 NewRes = DAG.getNode(ISD::SUB, DL, MVT::i64, NewRes, SignFill); 6975 6976 // NOTE: The result is only required to be anyextended, but sext is 6977 // consistent with type legalization of sub. 6978 NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewRes, 6979 DAG.getValueType(MVT::i32)); 6980 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes)); 6981 return; 6982 } 6983 case ISD::BITCAST: { 6984 EVT VT = N->getValueType(0); 6985 assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!"); 6986 SDValue Op0 = N->getOperand(0); 6987 EVT Op0VT = Op0.getValueType(); 6988 MVT XLenVT = Subtarget.getXLenVT(); 6989 if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) { 6990 SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0); 6991 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv)); 6992 } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() && 6993 Subtarget.hasStdExtF()) { 6994 SDValue FPConv = 6995 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0); 6996 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv)); 6997 } else if (!VT.isVector() && Op0VT.isFixedLengthVector() && 6998 isTypeLegal(Op0VT)) { 6999 // Custom-legalize bitcasts from fixed-length vector types to illegal 7000 // scalar types in order to improve codegen. Bitcast the vector to a 7001 // one-element vector type whose element type is the same as the result 7002 // type, and extract the first element. 7003 EVT BVT = EVT::getVectorVT(*DAG.getContext(), VT, 1); 7004 if (isTypeLegal(BVT)) { 7005 SDValue BVec = DAG.getBitcast(BVT, Op0); 7006 Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec, 7007 DAG.getConstant(0, DL, XLenVT))); 7008 } 7009 } 7010 break; 7011 } 7012 case RISCVISD::GREV: 7013 case RISCVISD::GORC: 7014 case RISCVISD::SHFL: { 7015 MVT VT = N->getSimpleValueType(0); 7016 MVT XLenVT = Subtarget.getXLenVT(); 7017 assert((VT == MVT::i16 || (VT == MVT::i32 && Subtarget.is64Bit())) && 7018 "Unexpected custom legalisation"); 7019 assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant"); 7020 assert((Subtarget.hasStdExtZbp() || 7021 (Subtarget.hasStdExtZbkb() && N->getOpcode() == RISCVISD::GREV && 7022 N->getConstantOperandVal(1) == 7)) && 7023 "Unexpected extension"); 7024 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0)); 7025 SDValue NewOp1 = 7026 DAG.getNode(ISD::ZERO_EXTEND, DL, XLenVT, N->getOperand(1)); 7027 SDValue NewRes = DAG.getNode(N->getOpcode(), DL, XLenVT, NewOp0, NewOp1); 7028 // ReplaceNodeResults requires we maintain the same type for the return 7029 // value. 7030 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, NewRes)); 7031 break; 7032 } 7033 case ISD::BSWAP: 7034 case ISD::BITREVERSE: { 7035 MVT VT = N->getSimpleValueType(0); 7036 MVT XLenVT = Subtarget.getXLenVT(); 7037 assert((VT == MVT::i8 || VT == MVT::i16 || 7038 (VT == MVT::i32 && Subtarget.is64Bit())) && 7039 Subtarget.hasStdExtZbp() && "Unexpected custom legalisation"); 7040 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0)); 7041 unsigned Imm = VT.getSizeInBits() - 1; 7042 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits. 7043 if (N->getOpcode() == ISD::BSWAP) 7044 Imm &= ~0x7U; 7045 SDValue GREVI = DAG.getNode(RISCVISD::GREV, DL, XLenVT, NewOp0, 7046 DAG.getConstant(Imm, DL, XLenVT)); 7047 // ReplaceNodeResults requires we maintain the same type for the return 7048 // value. 7049 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI)); 7050 break; 7051 } 7052 case ISD::FSHL: 7053 case ISD::FSHR: { 7054 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 7055 Subtarget.hasStdExtZbt() && "Unexpected custom legalisation"); 7056 SDValue NewOp0 = 7057 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0)); 7058 SDValue NewOp1 = 7059 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 7060 SDValue NewShAmt = 7061 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 7062 // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits. 7063 // Mask the shift amount to 5 bits to prevent accidentally setting bit 5. 7064 NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt, 7065 DAG.getConstant(0x1f, DL, MVT::i64)); 7066 // fshl and fshr concatenate their operands in the same order. fsrw and fslw 7067 // instruction use different orders. fshl will return its first operand for 7068 // shift of zero, fshr will return its second operand. fsl and fsr both 7069 // return rs1 so the ISD nodes need to have different operand orders. 7070 // Shift amount is in rs2. 7071 unsigned Opc = RISCVISD::FSLW; 7072 if (N->getOpcode() == ISD::FSHR) { 7073 std::swap(NewOp0, NewOp1); 7074 Opc = RISCVISD::FSRW; 7075 } 7076 SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt); 7077 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp)); 7078 break; 7079 } 7080 case ISD::EXTRACT_VECTOR_ELT: { 7081 // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element 7082 // type is illegal (currently only vXi64 RV32). 7083 // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are 7084 // transferred to the destination register. We issue two of these from the 7085 // upper- and lower- halves of the SEW-bit vector element, slid down to the 7086 // first element. 7087 SDValue Vec = N->getOperand(0); 7088 SDValue Idx = N->getOperand(1); 7089 7090 // The vector type hasn't been legalized yet so we can't issue target 7091 // specific nodes if it needs legalization. 7092 // FIXME: We would manually legalize if it's important. 7093 if (!isTypeLegal(Vec.getValueType())) 7094 return; 7095 7096 MVT VecVT = Vec.getSimpleValueType(); 7097 7098 assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 && 7099 VecVT.getVectorElementType() == MVT::i64 && 7100 "Unexpected EXTRACT_VECTOR_ELT legalization"); 7101 7102 // If this is a fixed vector, we need to convert it to a scalable vector. 7103 MVT ContainerVT = VecVT; 7104 if (VecVT.isFixedLengthVector()) { 7105 ContainerVT = getContainerForFixedLengthVector(VecVT); 7106 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget); 7107 } 7108 7109 MVT XLenVT = Subtarget.getXLenVT(); 7110 7111 // Use a VL of 1 to avoid processing more elements than we need. 7112 SDValue VL = DAG.getConstant(1, DL, XLenVT); 7113 SDValue Mask = getAllOnesMask(ContainerVT, VL, DL, DAG); 7114 7115 // Unless the index is known to be 0, we must slide the vector down to get 7116 // the desired element into index 0. 7117 if (!isNullConstant(Idx)) { 7118 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT, 7119 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL); 7120 } 7121 7122 // Extract the lower XLEN bits of the correct vector element. 7123 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 7124 7125 // To extract the upper XLEN bits of the vector element, shift the first 7126 // element right by 32 bits and re-extract the lower XLEN bits. 7127 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, 7128 DAG.getUNDEF(ContainerVT), 7129 DAG.getConstant(32, DL, XLenVT), VL); 7130 SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec, 7131 ThirtyTwoV, Mask, VL); 7132 7133 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); 7134 7135 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); 7136 break; 7137 } 7138 case ISD::INTRINSIC_WO_CHAIN: { 7139 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 7140 switch (IntNo) { 7141 default: 7142 llvm_unreachable( 7143 "Don't know how to custom type legalize this intrinsic!"); 7144 case Intrinsic::riscv_grev: 7145 case Intrinsic::riscv_gorc: { 7146 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 7147 "Unexpected custom legalisation"); 7148 SDValue NewOp1 = 7149 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 7150 SDValue NewOp2 = 7151 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 7152 unsigned Opc = 7153 IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW; 7154 // If the control is a constant, promote the node by clearing any extra 7155 // bits bits in the control. isel will form greviw/gorciw if the result is 7156 // sign extended. 7157 if (isa<ConstantSDNode>(NewOp2)) { 7158 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, 7159 DAG.getConstant(0x1f, DL, MVT::i64)); 7160 Opc = IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC; 7161 } 7162 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2); 7163 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 7164 break; 7165 } 7166 case Intrinsic::riscv_bcompress: 7167 case Intrinsic::riscv_bdecompress: 7168 case Intrinsic::riscv_bfp: 7169 case Intrinsic::riscv_fsl: 7170 case Intrinsic::riscv_fsr: { 7171 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 7172 "Unexpected custom legalisation"); 7173 Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo)); 7174 break; 7175 } 7176 case Intrinsic::riscv_orc_b: { 7177 // Lower to the GORCI encoding for orc.b with the operand extended. 7178 SDValue NewOp = 7179 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 7180 SDValue Res = DAG.getNode(RISCVISD::GORC, DL, MVT::i64, NewOp, 7181 DAG.getConstant(7, DL, MVT::i64)); 7182 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 7183 return; 7184 } 7185 case Intrinsic::riscv_shfl: 7186 case Intrinsic::riscv_unshfl: { 7187 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() && 7188 "Unexpected custom legalisation"); 7189 SDValue NewOp1 = 7190 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1)); 7191 SDValue NewOp2 = 7192 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2)); 7193 unsigned Opc = 7194 IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW; 7195 // There is no (UN)SHFLIW. If the control word is a constant, we can use 7196 // (UN)SHFLI with bit 4 of the control word cleared. The upper 32 bit half 7197 // will be shuffled the same way as the lower 32 bit half, but the two 7198 // halves won't cross. 7199 if (isa<ConstantSDNode>(NewOp2)) { 7200 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2, 7201 DAG.getConstant(0xf, DL, MVT::i64)); 7202 Opc = 7203 IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL; 7204 } 7205 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2); 7206 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res)); 7207 break; 7208 } 7209 case Intrinsic::riscv_vmv_x_s: { 7210 EVT VT = N->getValueType(0); 7211 MVT XLenVT = Subtarget.getXLenVT(); 7212 if (VT.bitsLT(XLenVT)) { 7213 // Simple case just extract using vmv.x.s and truncate. 7214 SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL, 7215 Subtarget.getXLenVT(), N->getOperand(1)); 7216 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract)); 7217 return; 7218 } 7219 7220 assert(VT == MVT::i64 && !Subtarget.is64Bit() && 7221 "Unexpected custom legalization"); 7222 7223 // We need to do the move in two steps. 7224 SDValue Vec = N->getOperand(1); 7225 MVT VecVT = Vec.getSimpleValueType(); 7226 7227 // First extract the lower XLEN bits of the element. 7228 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec); 7229 7230 // To extract the upper XLEN bits of the vector element, shift the first 7231 // element right by 32 bits and re-extract the lower XLEN bits. 7232 SDValue VL = DAG.getConstant(1, DL, XLenVT); 7233 SDValue Mask = getAllOnesMask(VecVT, VL, DL, DAG); 7234 7235 SDValue ThirtyTwoV = 7236 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT, DAG.getUNDEF(VecVT), 7237 DAG.getConstant(32, DL, XLenVT), VL); 7238 SDValue LShr32 = 7239 DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL); 7240 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32); 7241 7242 Results.push_back( 7243 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi)); 7244 break; 7245 } 7246 } 7247 break; 7248 } 7249 case ISD::VECREDUCE_ADD: 7250 case ISD::VECREDUCE_AND: 7251 case ISD::VECREDUCE_OR: 7252 case ISD::VECREDUCE_XOR: 7253 case ISD::VECREDUCE_SMAX: 7254 case ISD::VECREDUCE_UMAX: 7255 case ISD::VECREDUCE_SMIN: 7256 case ISD::VECREDUCE_UMIN: 7257 if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG)) 7258 Results.push_back(V); 7259 break; 7260 case ISD::VP_REDUCE_ADD: 7261 case ISD::VP_REDUCE_AND: 7262 case ISD::VP_REDUCE_OR: 7263 case ISD::VP_REDUCE_XOR: 7264 case ISD::VP_REDUCE_SMAX: 7265 case ISD::VP_REDUCE_UMAX: 7266 case ISD::VP_REDUCE_SMIN: 7267 case ISD::VP_REDUCE_UMIN: 7268 if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG)) 7269 Results.push_back(V); 7270 break; 7271 case ISD::FLT_ROUNDS_: { 7272 SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other); 7273 SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0)); 7274 Results.push_back(Res.getValue(0)); 7275 Results.push_back(Res.getValue(1)); 7276 break; 7277 } 7278 } 7279 } 7280 7281 // A structure to hold one of the bit-manipulation patterns below. Together, a 7282 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source: 7283 // (or (and (shl x, 1), 0xAAAAAAAA), 7284 // (and (srl x, 1), 0x55555555)) 7285 struct RISCVBitmanipPat { 7286 SDValue Op; 7287 unsigned ShAmt; 7288 bool IsSHL; 7289 7290 bool formsPairWith(const RISCVBitmanipPat &Other) const { 7291 return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL; 7292 } 7293 }; 7294 7295 // Matches patterns of the form 7296 // (and (shl x, C2), (C1 << C2)) 7297 // (and (srl x, C2), C1) 7298 // (shl (and x, C1), C2) 7299 // (srl (and x, (C1 << C2)), C2) 7300 // Where C2 is a power of 2 and C1 has at least that many leading zeroes. 7301 // The expected masks for each shift amount are specified in BitmanipMasks where 7302 // BitmanipMasks[log2(C2)] specifies the expected C1 value. 7303 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether 7304 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible 7305 // XLen is 64. 7306 static Optional<RISCVBitmanipPat> 7307 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) { 7308 assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) && 7309 "Unexpected number of masks"); 7310 Optional<uint64_t> Mask; 7311 // Optionally consume a mask around the shift operation. 7312 if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) { 7313 Mask = Op.getConstantOperandVal(1); 7314 Op = Op.getOperand(0); 7315 } 7316 if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL) 7317 return None; 7318 bool IsSHL = Op.getOpcode() == ISD::SHL; 7319 7320 if (!isa<ConstantSDNode>(Op.getOperand(1))) 7321 return None; 7322 uint64_t ShAmt = Op.getConstantOperandVal(1); 7323 7324 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 7325 if (ShAmt >= Width || !isPowerOf2_64(ShAmt)) 7326 return None; 7327 // If we don't have enough masks for 64 bit, then we must be trying to 7328 // match SHFL so we're only allowed to shift 1/4 of the width. 7329 if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2)) 7330 return None; 7331 7332 SDValue Src = Op.getOperand(0); 7333 7334 // The expected mask is shifted left when the AND is found around SHL 7335 // patterns. 7336 // ((x >> 1) & 0x55555555) 7337 // ((x << 1) & 0xAAAAAAAA) 7338 bool SHLExpMask = IsSHL; 7339 7340 if (!Mask) { 7341 // Sometimes LLVM keeps the mask as an operand of the shift, typically when 7342 // the mask is all ones: consume that now. 7343 if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) { 7344 Mask = Src.getConstantOperandVal(1); 7345 Src = Src.getOperand(0); 7346 // The expected mask is now in fact shifted left for SRL, so reverse the 7347 // decision. 7348 // ((x & 0xAAAAAAAA) >> 1) 7349 // ((x & 0x55555555) << 1) 7350 SHLExpMask = !SHLExpMask; 7351 } else { 7352 // Use a default shifted mask of all-ones if there's no AND, truncated 7353 // down to the expected width. This simplifies the logic later on. 7354 Mask = maskTrailingOnes<uint64_t>(Width); 7355 *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt); 7356 } 7357 } 7358 7359 unsigned MaskIdx = Log2_32(ShAmt); 7360 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 7361 7362 if (SHLExpMask) 7363 ExpMask <<= ShAmt; 7364 7365 if (Mask != ExpMask) 7366 return None; 7367 7368 return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL}; 7369 } 7370 7371 // Matches any of the following bit-manipulation patterns: 7372 // (and (shl x, 1), (0x55555555 << 1)) 7373 // (and (srl x, 1), 0x55555555) 7374 // (shl (and x, 0x55555555), 1) 7375 // (srl (and x, (0x55555555 << 1)), 1) 7376 // where the shift amount and mask may vary thus: 7377 // [1] = 0x55555555 / 0xAAAAAAAA 7378 // [2] = 0x33333333 / 0xCCCCCCCC 7379 // [4] = 0x0F0F0F0F / 0xF0F0F0F0 7380 // [8] = 0x00FF00FF / 0xFF00FF00 7381 // [16] = 0x0000FFFF / 0xFFFFFFFF 7382 // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64) 7383 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) { 7384 // These are the unshifted masks which we use to match bit-manipulation 7385 // patterns. They may be shifted left in certain circumstances. 7386 static const uint64_t BitmanipMasks[] = { 7387 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 7388 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; 7389 7390 return matchRISCVBitmanipPat(Op, BitmanipMasks); 7391 } 7392 7393 // Try to fold (<bop> x, (reduction.<bop> vec, start)) 7394 static SDValue combineBinOpToReduce(SDNode *N, SelectionDAG &DAG) { 7395 auto BinOpToRVVReduce = [](unsigned Opc) { 7396 switch (Opc) { 7397 default: 7398 llvm_unreachable("Unhandled binary to transfrom reduction"); 7399 case ISD::ADD: 7400 return RISCVISD::VECREDUCE_ADD_VL; 7401 case ISD::UMAX: 7402 return RISCVISD::VECREDUCE_UMAX_VL; 7403 case ISD::SMAX: 7404 return RISCVISD::VECREDUCE_SMAX_VL; 7405 case ISD::UMIN: 7406 return RISCVISD::VECREDUCE_UMIN_VL; 7407 case ISD::SMIN: 7408 return RISCVISD::VECREDUCE_SMIN_VL; 7409 case ISD::AND: 7410 return RISCVISD::VECREDUCE_AND_VL; 7411 case ISD::OR: 7412 return RISCVISD::VECREDUCE_OR_VL; 7413 case ISD::XOR: 7414 return RISCVISD::VECREDUCE_XOR_VL; 7415 case ISD::FADD: 7416 return RISCVISD::VECREDUCE_FADD_VL; 7417 case ISD::FMAXNUM: 7418 return RISCVISD::VECREDUCE_FMAX_VL; 7419 case ISD::FMINNUM: 7420 return RISCVISD::VECREDUCE_FMIN_VL; 7421 } 7422 }; 7423 7424 auto IsReduction = [&BinOpToRVVReduce](SDValue V, unsigned Opc) { 7425 return V.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 7426 isNullConstant(V.getOperand(1)) && 7427 V.getOperand(0).getOpcode() == BinOpToRVVReduce(Opc); 7428 }; 7429 7430 unsigned Opc = N->getOpcode(); 7431 unsigned ReduceIdx; 7432 if (IsReduction(N->getOperand(0), Opc)) 7433 ReduceIdx = 0; 7434 else if (IsReduction(N->getOperand(1), Opc)) 7435 ReduceIdx = 1; 7436 else 7437 return SDValue(); 7438 7439 // Skip if FADD disallows reassociation but the combiner needs. 7440 if (Opc == ISD::FADD && !N->getFlags().hasAllowReassociation()) 7441 return SDValue(); 7442 7443 SDValue Extract = N->getOperand(ReduceIdx); 7444 SDValue Reduce = Extract.getOperand(0); 7445 if (!Reduce.hasOneUse()) 7446 return SDValue(); 7447 7448 SDValue ScalarV = Reduce.getOperand(2); 7449 7450 // Make sure that ScalarV is a splat with VL=1. 7451 if (ScalarV.getOpcode() != RISCVISD::VFMV_S_F_VL && 7452 ScalarV.getOpcode() != RISCVISD::VMV_S_X_VL && 7453 ScalarV.getOpcode() != RISCVISD::VMV_V_X_VL) 7454 return SDValue(); 7455 7456 if (!isOneConstant(ScalarV.getOperand(2))) 7457 return SDValue(); 7458 7459 // TODO: Deal with value other than neutral element. 7460 auto IsRVVNeutralElement = [Opc, &DAG](SDNode *N, SDValue V) { 7461 if (Opc == ISD::FADD && N->getFlags().hasNoSignedZeros() && 7462 isNullFPConstant(V)) 7463 return true; 7464 return DAG.getNeutralElement(Opc, SDLoc(V), V.getSimpleValueType(), 7465 N->getFlags()) == V; 7466 }; 7467 7468 // Check the scalar of ScalarV is neutral element 7469 if (!IsRVVNeutralElement(N, ScalarV.getOperand(1))) 7470 return SDValue(); 7471 7472 if (!ScalarV.hasOneUse()) 7473 return SDValue(); 7474 7475 EVT SplatVT = ScalarV.getValueType(); 7476 SDValue NewStart = N->getOperand(1 - ReduceIdx); 7477 unsigned SplatOpc = RISCVISD::VFMV_S_F_VL; 7478 if (SplatVT.isInteger()) { 7479 auto *C = dyn_cast<ConstantSDNode>(NewStart.getNode()); 7480 if (!C || C->isZero() || !isInt<5>(C->getSExtValue())) 7481 SplatOpc = RISCVISD::VMV_S_X_VL; 7482 else 7483 SplatOpc = RISCVISD::VMV_V_X_VL; 7484 } 7485 7486 SDValue NewScalarV = 7487 DAG.getNode(SplatOpc, SDLoc(N), SplatVT, ScalarV.getOperand(0), NewStart, 7488 ScalarV.getOperand(2)); 7489 SDValue NewReduce = 7490 DAG.getNode(Reduce.getOpcode(), SDLoc(Reduce), Reduce.getValueType(), 7491 Reduce.getOperand(0), Reduce.getOperand(1), NewScalarV, 7492 Reduce.getOperand(3), Reduce.getOperand(4)); 7493 return DAG.getNode(Extract.getOpcode(), SDLoc(Extract), 7494 Extract.getValueType(), NewReduce, Extract.getOperand(1)); 7495 } 7496 7497 // Match the following pattern as a GREVI(W) operation 7498 // (or (BITMANIP_SHL x), (BITMANIP_SRL x)) 7499 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG, 7500 const RISCVSubtarget &Subtarget) { 7501 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 7502 EVT VT = Op.getValueType(); 7503 7504 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 7505 auto LHS = matchGREVIPat(Op.getOperand(0)); 7506 auto RHS = matchGREVIPat(Op.getOperand(1)); 7507 if (LHS && RHS && LHS->formsPairWith(*RHS)) { 7508 SDLoc DL(Op); 7509 return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op, 7510 DAG.getConstant(LHS->ShAmt, DL, VT)); 7511 } 7512 } 7513 return SDValue(); 7514 } 7515 7516 // Matches any the following pattern as a GORCI(W) operation 7517 // 1. (or (GREVI x, shamt), x) if shamt is a power of 2 7518 // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2 7519 // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x)) 7520 // Note that with the variant of 3., 7521 // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x) 7522 // the inner pattern will first be matched as GREVI and then the outer 7523 // pattern will be matched to GORC via the first rule above. 7524 // 4. (or (rotl/rotr x, bitwidth/2), x) 7525 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG, 7526 const RISCVSubtarget &Subtarget) { 7527 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 7528 EVT VT = Op.getValueType(); 7529 7530 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) { 7531 SDLoc DL(Op); 7532 SDValue Op0 = Op.getOperand(0); 7533 SDValue Op1 = Op.getOperand(1); 7534 7535 auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) { 7536 if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X && 7537 isa<ConstantSDNode>(Reverse.getOperand(1)) && 7538 isPowerOf2_32(Reverse.getConstantOperandVal(1))) 7539 return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1)); 7540 // We can also form GORCI from ROTL/ROTR by half the bitwidth. 7541 if ((Reverse.getOpcode() == ISD::ROTL || 7542 Reverse.getOpcode() == ISD::ROTR) && 7543 Reverse.getOperand(0) == X && 7544 isa<ConstantSDNode>(Reverse.getOperand(1))) { 7545 uint64_t RotAmt = Reverse.getConstantOperandVal(1); 7546 if (RotAmt == (VT.getSizeInBits() / 2)) 7547 return DAG.getNode(RISCVISD::GORC, DL, VT, X, 7548 DAG.getConstant(RotAmt, DL, VT)); 7549 } 7550 return SDValue(); 7551 }; 7552 7553 // Check for either commutable permutation of (or (GREVI x, shamt), x) 7554 if (SDValue V = MatchOROfReverse(Op0, Op1)) 7555 return V; 7556 if (SDValue V = MatchOROfReverse(Op1, Op0)) 7557 return V; 7558 7559 // OR is commutable so canonicalize its OR operand to the left 7560 if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR) 7561 std::swap(Op0, Op1); 7562 if (Op0.getOpcode() != ISD::OR) 7563 return SDValue(); 7564 SDValue OrOp0 = Op0.getOperand(0); 7565 SDValue OrOp1 = Op0.getOperand(1); 7566 auto LHS = matchGREVIPat(OrOp0); 7567 // OR is commutable so swap the operands and try again: x might have been 7568 // on the left 7569 if (!LHS) { 7570 std::swap(OrOp0, OrOp1); 7571 LHS = matchGREVIPat(OrOp0); 7572 } 7573 auto RHS = matchGREVIPat(Op1); 7574 if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) { 7575 return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op, 7576 DAG.getConstant(LHS->ShAmt, DL, VT)); 7577 } 7578 } 7579 return SDValue(); 7580 } 7581 7582 // Matches any of the following bit-manipulation patterns: 7583 // (and (shl x, 1), (0x22222222 << 1)) 7584 // (and (srl x, 1), 0x22222222) 7585 // (shl (and x, 0x22222222), 1) 7586 // (srl (and x, (0x22222222 << 1)), 1) 7587 // where the shift amount and mask may vary thus: 7588 // [1] = 0x22222222 / 0x44444444 7589 // [2] = 0x0C0C0C0C / 0x3C3C3C3C 7590 // [4] = 0x00F000F0 / 0x0F000F00 7591 // [8] = 0x0000FF00 / 0x00FF0000 7592 // [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64) 7593 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) { 7594 // These are the unshifted masks which we use to match bit-manipulation 7595 // patterns. They may be shifted left in certain circumstances. 7596 static const uint64_t BitmanipMasks[] = { 7597 0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL, 7598 0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL}; 7599 7600 return matchRISCVBitmanipPat(Op, BitmanipMasks); 7601 } 7602 7603 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x) 7604 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG, 7605 const RISCVSubtarget &Subtarget) { 7606 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson"); 7607 EVT VT = Op.getValueType(); 7608 7609 if (VT != MVT::i32 && VT != Subtarget.getXLenVT()) 7610 return SDValue(); 7611 7612 SDValue Op0 = Op.getOperand(0); 7613 SDValue Op1 = Op.getOperand(1); 7614 7615 // Or is commutable so canonicalize the second OR to the LHS. 7616 if (Op0.getOpcode() != ISD::OR) 7617 std::swap(Op0, Op1); 7618 if (Op0.getOpcode() != ISD::OR) 7619 return SDValue(); 7620 7621 // We found an inner OR, so our operands are the operands of the inner OR 7622 // and the other operand of the outer OR. 7623 SDValue A = Op0.getOperand(0); 7624 SDValue B = Op0.getOperand(1); 7625 SDValue C = Op1; 7626 7627 auto Match1 = matchSHFLPat(A); 7628 auto Match2 = matchSHFLPat(B); 7629 7630 // If neither matched, we failed. 7631 if (!Match1 && !Match2) 7632 return SDValue(); 7633 7634 // We had at least one match. if one failed, try the remaining C operand. 7635 if (!Match1) { 7636 std::swap(A, C); 7637 Match1 = matchSHFLPat(A); 7638 if (!Match1) 7639 return SDValue(); 7640 } else if (!Match2) { 7641 std::swap(B, C); 7642 Match2 = matchSHFLPat(B); 7643 if (!Match2) 7644 return SDValue(); 7645 } 7646 assert(Match1 && Match2); 7647 7648 // Make sure our matches pair up. 7649 if (!Match1->formsPairWith(*Match2)) 7650 return SDValue(); 7651 7652 // All the remains is to make sure C is an AND with the same input, that masks 7653 // out the bits that are being shuffled. 7654 if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) || 7655 C.getOperand(0) != Match1->Op) 7656 return SDValue(); 7657 7658 uint64_t Mask = C.getConstantOperandVal(1); 7659 7660 static const uint64_t BitmanipMasks[] = { 7661 0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL, 7662 0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL, 7663 }; 7664 7665 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32; 7666 unsigned MaskIdx = Log2_32(Match1->ShAmt); 7667 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width); 7668 7669 if (Mask != ExpMask) 7670 return SDValue(); 7671 7672 SDLoc DL(Op); 7673 return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op, 7674 DAG.getConstant(Match1->ShAmt, DL, VT)); 7675 } 7676 7677 // Optimize (add (shl x, c0), (shl y, c1)) -> 7678 // (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3]. 7679 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG, 7680 const RISCVSubtarget &Subtarget) { 7681 // Perform this optimization only in the zba extension. 7682 if (!Subtarget.hasStdExtZba()) 7683 return SDValue(); 7684 7685 // Skip for vector types and larger types. 7686 EVT VT = N->getValueType(0); 7687 if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen()) 7688 return SDValue(); 7689 7690 // The two operand nodes must be SHL and have no other use. 7691 SDValue N0 = N->getOperand(0); 7692 SDValue N1 = N->getOperand(1); 7693 if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL || 7694 !N0->hasOneUse() || !N1->hasOneUse()) 7695 return SDValue(); 7696 7697 // Check c0 and c1. 7698 auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 7699 auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1)); 7700 if (!N0C || !N1C) 7701 return SDValue(); 7702 int64_t C0 = N0C->getSExtValue(); 7703 int64_t C1 = N1C->getSExtValue(); 7704 if (C0 <= 0 || C1 <= 0) 7705 return SDValue(); 7706 7707 // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable. 7708 int64_t Bits = std::min(C0, C1); 7709 int64_t Diff = std::abs(C0 - C1); 7710 if (Diff != 1 && Diff != 2 && Diff != 3) 7711 return SDValue(); 7712 7713 // Build nodes. 7714 SDLoc DL(N); 7715 SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0); 7716 SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0); 7717 SDValue NA0 = 7718 DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT)); 7719 SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS); 7720 return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT)); 7721 } 7722 7723 // Combine 7724 // ROTR ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32 7725 // ROTL ((GREVI x, 24), 16) -> (GREVI x, 8) for RV32 7726 // ROTR ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64 7727 // ROTL ((GREVI x, 56), 32) -> (GREVI x, 24) for RV64 7728 // RORW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64 7729 // ROLW ((GREVI x, 24), 16) -> (GREVIW x, 8) for RV64 7730 // The grev patterns represents BSWAP. 7731 // FIXME: This can be generalized to any GREV. We just need to toggle the MSB 7732 // off the grev. 7733 static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG, 7734 const RISCVSubtarget &Subtarget) { 7735 bool IsWInstruction = 7736 N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW; 7737 assert((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL || 7738 IsWInstruction) && 7739 "Unexpected opcode!"); 7740 SDValue Src = N->getOperand(0); 7741 EVT VT = N->getValueType(0); 7742 SDLoc DL(N); 7743 7744 if (!Subtarget.hasStdExtZbp() || Src.getOpcode() != RISCVISD::GREV) 7745 return SDValue(); 7746 7747 if (!isa<ConstantSDNode>(N->getOperand(1)) || 7748 !isa<ConstantSDNode>(Src.getOperand(1))) 7749 return SDValue(); 7750 7751 unsigned BitWidth = IsWInstruction ? 32 : VT.getSizeInBits(); 7752 assert(isPowerOf2_32(BitWidth) && "Expected a power of 2"); 7753 7754 // Needs to be a rotate by half the bitwidth for ROTR/ROTL or by 16 for 7755 // RORW/ROLW. And the grev should be the encoding for bswap for this width. 7756 unsigned ShAmt1 = N->getConstantOperandVal(1); 7757 unsigned ShAmt2 = Src.getConstantOperandVal(1); 7758 if (BitWidth < 32 || ShAmt1 != (BitWidth / 2) || ShAmt2 != (BitWidth - 8)) 7759 return SDValue(); 7760 7761 Src = Src.getOperand(0); 7762 7763 // Toggle bit the MSB of the shift. 7764 unsigned CombinedShAmt = ShAmt1 ^ ShAmt2; 7765 if (CombinedShAmt == 0) 7766 return Src; 7767 7768 SDValue Res = DAG.getNode( 7769 RISCVISD::GREV, DL, VT, Src, 7770 DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType())); 7771 if (!IsWInstruction) 7772 return Res; 7773 7774 // Sign extend the result to match the behavior of the rotate. This will be 7775 // selected to GREVIW in isel. 7776 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Res, 7777 DAG.getValueType(MVT::i32)); 7778 } 7779 7780 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is 7781 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself. 7782 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does 7783 // not undo itself, but they are redundant. 7784 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) { 7785 bool IsGORC = N->getOpcode() == RISCVISD::GORC; 7786 assert((IsGORC || N->getOpcode() == RISCVISD::GREV) && "Unexpected opcode"); 7787 SDValue Src = N->getOperand(0); 7788 7789 if (Src.getOpcode() != N->getOpcode()) 7790 return SDValue(); 7791 7792 if (!isa<ConstantSDNode>(N->getOperand(1)) || 7793 !isa<ConstantSDNode>(Src.getOperand(1))) 7794 return SDValue(); 7795 7796 unsigned ShAmt1 = N->getConstantOperandVal(1); 7797 unsigned ShAmt2 = Src.getConstantOperandVal(1); 7798 Src = Src.getOperand(0); 7799 7800 unsigned CombinedShAmt; 7801 if (IsGORC) 7802 CombinedShAmt = ShAmt1 | ShAmt2; 7803 else 7804 CombinedShAmt = ShAmt1 ^ ShAmt2; 7805 7806 if (CombinedShAmt == 0) 7807 return Src; 7808 7809 SDLoc DL(N); 7810 return DAG.getNode( 7811 N->getOpcode(), DL, N->getValueType(0), Src, 7812 DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType())); 7813 } 7814 7815 // Combine a constant select operand into its use: 7816 // 7817 // (and (select cond, -1, c), x) 7818 // -> (select cond, x, (and x, c)) [AllOnes=1] 7819 // (or (select cond, 0, c), x) 7820 // -> (select cond, x, (or x, c)) [AllOnes=0] 7821 // (xor (select cond, 0, c), x) 7822 // -> (select cond, x, (xor x, c)) [AllOnes=0] 7823 // (add (select cond, 0, c), x) 7824 // -> (select cond, x, (add x, c)) [AllOnes=0] 7825 // (sub x, (select cond, 0, c)) 7826 // -> (select cond, x, (sub x, c)) [AllOnes=0] 7827 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp, 7828 SelectionDAG &DAG, bool AllOnes) { 7829 EVT VT = N->getValueType(0); 7830 7831 // Skip vectors. 7832 if (VT.isVector()) 7833 return SDValue(); 7834 7835 if ((Slct.getOpcode() != ISD::SELECT && 7836 Slct.getOpcode() != RISCVISD::SELECT_CC) || 7837 !Slct.hasOneUse()) 7838 return SDValue(); 7839 7840 auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) { 7841 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N); 7842 }; 7843 7844 bool SwapSelectOps; 7845 unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0; 7846 SDValue TrueVal = Slct.getOperand(1 + OpOffset); 7847 SDValue FalseVal = Slct.getOperand(2 + OpOffset); 7848 SDValue NonConstantVal; 7849 if (isZeroOrAllOnes(TrueVal, AllOnes)) { 7850 SwapSelectOps = false; 7851 NonConstantVal = FalseVal; 7852 } else if (isZeroOrAllOnes(FalseVal, AllOnes)) { 7853 SwapSelectOps = true; 7854 NonConstantVal = TrueVal; 7855 } else 7856 return SDValue(); 7857 7858 // Slct is now know to be the desired identity constant when CC is true. 7859 TrueVal = OtherOp; 7860 FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal); 7861 // Unless SwapSelectOps says the condition should be false. 7862 if (SwapSelectOps) 7863 std::swap(TrueVal, FalseVal); 7864 7865 if (Slct.getOpcode() == RISCVISD::SELECT_CC) 7866 return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT, 7867 {Slct.getOperand(0), Slct.getOperand(1), 7868 Slct.getOperand(2), TrueVal, FalseVal}); 7869 7870 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, 7871 {Slct.getOperand(0), TrueVal, FalseVal}); 7872 } 7873 7874 // Attempt combineSelectAndUse on each operand of a commutative operator N. 7875 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG, 7876 bool AllOnes) { 7877 SDValue N0 = N->getOperand(0); 7878 SDValue N1 = N->getOperand(1); 7879 if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes)) 7880 return Result; 7881 if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes)) 7882 return Result; 7883 return SDValue(); 7884 } 7885 7886 // Transform (add (mul x, c0), c1) -> 7887 // (add (mul (add x, c1/c0), c0), c1%c0). 7888 // if c1/c0 and c1%c0 are simm12, while c1 is not. A special corner case 7889 // that should be excluded is when c0*(c1/c0) is simm12, which will lead 7890 // to an infinite loop in DAGCombine if transformed. 7891 // Or transform (add (mul x, c0), c1) -> 7892 // (add (mul (add x, c1/c0+1), c0), c1%c0-c0), 7893 // if c1/c0+1 and c1%c0-c0 are simm12, while c1 is not. A special corner 7894 // case that should be excluded is when c0*(c1/c0+1) is simm12, which will 7895 // lead to an infinite loop in DAGCombine if transformed. 7896 // Or transform (add (mul x, c0), c1) -> 7897 // (add (mul (add x, c1/c0-1), c0), c1%c0+c0), 7898 // if c1/c0-1 and c1%c0+c0 are simm12, while c1 is not. A special corner 7899 // case that should be excluded is when c0*(c1/c0-1) is simm12, which will 7900 // lead to an infinite loop in DAGCombine if transformed. 7901 // Or transform (add (mul x, c0), c1) -> 7902 // (mul (add x, c1/c0), c0). 7903 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not. 7904 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG, 7905 const RISCVSubtarget &Subtarget) { 7906 // Skip for vector types and larger types. 7907 EVT VT = N->getValueType(0); 7908 if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen()) 7909 return SDValue(); 7910 // The first operand node must be a MUL and has no other use. 7911 SDValue N0 = N->getOperand(0); 7912 if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL) 7913 return SDValue(); 7914 // Check if c0 and c1 match above conditions. 7915 auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 7916 auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 7917 if (!N0C || !N1C) 7918 return SDValue(); 7919 // If N0C has multiple uses it's possible one of the cases in 7920 // DAGCombiner::isMulAddWithConstProfitable will be true, which would result 7921 // in an infinite loop. 7922 if (!N0C->hasOneUse()) 7923 return SDValue(); 7924 int64_t C0 = N0C->getSExtValue(); 7925 int64_t C1 = N1C->getSExtValue(); 7926 int64_t CA, CB; 7927 if (C0 == -1 || C0 == 0 || C0 == 1 || isInt<12>(C1)) 7928 return SDValue(); 7929 // Search for proper CA (non-zero) and CB that both are simm12. 7930 if ((C1 / C0) != 0 && isInt<12>(C1 / C0) && isInt<12>(C1 % C0) && 7931 !isInt<12>(C0 * (C1 / C0))) { 7932 CA = C1 / C0; 7933 CB = C1 % C0; 7934 } else if ((C1 / C0 + 1) != 0 && isInt<12>(C1 / C0 + 1) && 7935 isInt<12>(C1 % C0 - C0) && !isInt<12>(C0 * (C1 / C0 + 1))) { 7936 CA = C1 / C0 + 1; 7937 CB = C1 % C0 - C0; 7938 } else if ((C1 / C0 - 1) != 0 && isInt<12>(C1 / C0 - 1) && 7939 isInt<12>(C1 % C0 + C0) && !isInt<12>(C0 * (C1 / C0 - 1))) { 7940 CA = C1 / C0 - 1; 7941 CB = C1 % C0 + C0; 7942 } else 7943 return SDValue(); 7944 // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0). 7945 SDLoc DL(N); 7946 SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0), 7947 DAG.getConstant(CA, DL, VT)); 7948 SDValue New1 = 7949 DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT)); 7950 return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(CB, DL, VT)); 7951 } 7952 7953 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG, 7954 const RISCVSubtarget &Subtarget) { 7955 if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget)) 7956 return V; 7957 if (SDValue V = transformAddShlImm(N, DAG, Subtarget)) 7958 return V; 7959 if (SDValue V = combineBinOpToReduce(N, DAG)) 7960 return V; 7961 // fold (add (select lhs, rhs, cc, 0, y), x) -> 7962 // (select lhs, rhs, cc, x, (add x, y)) 7963 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false); 7964 } 7965 7966 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) { 7967 // fold (sub x, (select lhs, rhs, cc, 0, y)) -> 7968 // (select lhs, rhs, cc, x, (sub x, y)) 7969 SDValue N0 = N->getOperand(0); 7970 SDValue N1 = N->getOperand(1); 7971 return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false); 7972 } 7973 7974 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG, 7975 const RISCVSubtarget &Subtarget) { 7976 SDValue N0 = N->getOperand(0); 7977 // Pre-promote (i32 (and (srl X, Y), 1)) on RV64 with Zbs without zero 7978 // extending X. This is safe since we only need the LSB after the shift and 7979 // shift amounts larger than 31 would produce poison. If we wait until 7980 // type legalization, we'll create RISCVISD::SRLW and we can't recover it 7981 // to use a BEXT instruction. 7982 if (Subtarget.is64Bit() && Subtarget.hasStdExtZbs() && 7983 N->getValueType(0) == MVT::i32 && isOneConstant(N->getOperand(1)) && 7984 N0.getOpcode() == ISD::SRL && !isa<ConstantSDNode>(N0.getOperand(1)) && 7985 N0.hasOneUse()) { 7986 SDLoc DL(N); 7987 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N0.getOperand(0)); 7988 SDValue Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N0.getOperand(1)); 7989 SDValue Srl = DAG.getNode(ISD::SRL, DL, MVT::i64, Op0, Op1); 7990 SDValue And = DAG.getNode(ISD::AND, DL, MVT::i64, Srl, 7991 DAG.getConstant(1, DL, MVT::i64)); 7992 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, And); 7993 } 7994 7995 if (SDValue V = combineBinOpToReduce(N, DAG)) 7996 return V; 7997 7998 // fold (and (select lhs, rhs, cc, -1, y), x) -> 7999 // (select lhs, rhs, cc, x, (and x, y)) 8000 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true); 8001 } 8002 8003 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG, 8004 const RISCVSubtarget &Subtarget) { 8005 if (Subtarget.hasStdExtZbp()) { 8006 if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget)) 8007 return GREV; 8008 if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget)) 8009 return GORC; 8010 if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget)) 8011 return SHFL; 8012 } 8013 8014 if (SDValue V = combineBinOpToReduce(N, DAG)) 8015 return V; 8016 // fold (or (select cond, 0, y), x) -> 8017 // (select cond, x, (or x, y)) 8018 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false); 8019 } 8020 8021 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) { 8022 SDValue N0 = N->getOperand(0); 8023 SDValue N1 = N->getOperand(1); 8024 8025 // fold (xor (sllw 1, x), -1) -> (rolw ~1, x) 8026 // NOTE: Assumes ROL being legal means ROLW is legal. 8027 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8028 if (N0.getOpcode() == RISCVISD::SLLW && 8029 isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0)) && 8030 TLI.isOperationLegal(ISD::ROTL, MVT::i64)) { 8031 SDLoc DL(N); 8032 return DAG.getNode(RISCVISD::ROLW, DL, MVT::i64, 8033 DAG.getConstant(~1, DL, MVT::i64), N0.getOperand(1)); 8034 } 8035 8036 if (SDValue V = combineBinOpToReduce(N, DAG)) 8037 return V; 8038 // fold (xor (select cond, 0, y), x) -> 8039 // (select cond, x, (xor x, y)) 8040 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false); 8041 } 8042 8043 static SDValue 8044 performSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG, 8045 const RISCVSubtarget &Subtarget) { 8046 SDValue Src = N->getOperand(0); 8047 EVT VT = N->getValueType(0); 8048 8049 // Fold (sext_inreg (fmv_x_anyexth X), i16) -> (fmv_x_signexth X) 8050 if (Src.getOpcode() == RISCVISD::FMV_X_ANYEXTH && 8051 cast<VTSDNode>(N->getOperand(1))->getVT().bitsGE(MVT::i16)) 8052 return DAG.getNode(RISCVISD::FMV_X_SIGNEXTH, SDLoc(N), VT, 8053 Src.getOperand(0)); 8054 8055 // Fold (i64 (sext_inreg (abs X), i32)) -> 8056 // (i64 (smax (sext_inreg (neg X), i32), X)) if X has more than 32 sign bits. 8057 // The (sext_inreg (neg X), i32) will be selected to negw by isel. This 8058 // pattern occurs after type legalization of (i32 (abs X)) on RV64 if the user 8059 // of the (i32 (abs X)) is a sext or setcc or something else that causes type 8060 // legalization to add a sext_inreg after the abs. The (i32 (abs X)) will have 8061 // been type legalized to (i64 (abs (sext_inreg X, i32))), but the sext_inreg 8062 // may get combined into an earlier operation so we need to use 8063 // ComputeNumSignBits. 8064 // NOTE: (i64 (sext_inreg (abs X), i32)) can also be created for 8065 // (i64 (ashr (shl (abs X), 32), 32)) without any type legalization so 8066 // we can't assume that X has 33 sign bits. We must check. 8067 if (Subtarget.hasStdExtZbb() && Subtarget.is64Bit() && 8068 Src.getOpcode() == ISD::ABS && Src.hasOneUse() && VT == MVT::i64 && 8069 cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32 && 8070 DAG.ComputeNumSignBits(Src.getOperand(0)) > 32) { 8071 SDLoc DL(N); 8072 SDValue Freeze = DAG.getFreeze(Src.getOperand(0)); 8073 SDValue Neg = 8074 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, MVT::i64), Freeze); 8075 Neg = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Neg, 8076 DAG.getValueType(MVT::i32)); 8077 return DAG.getNode(ISD::SMAX, DL, MVT::i64, Freeze, Neg); 8078 } 8079 8080 return SDValue(); 8081 } 8082 8083 // Try to form vwadd(u).wv/wx or vwsub(u).wv/wx. It might later be optimized to 8084 // vwadd(u).vv/vx or vwsub(u).vv/vx. 8085 static SDValue combineADDSUB_VLToVWADDSUB_VL(SDNode *N, SelectionDAG &DAG, 8086 bool Commute = false) { 8087 assert((N->getOpcode() == RISCVISD::ADD_VL || 8088 N->getOpcode() == RISCVISD::SUB_VL) && 8089 "Unexpected opcode"); 8090 bool IsAdd = N->getOpcode() == RISCVISD::ADD_VL; 8091 SDValue Op0 = N->getOperand(0); 8092 SDValue Op1 = N->getOperand(1); 8093 if (Commute) 8094 std::swap(Op0, Op1); 8095 8096 MVT VT = N->getSimpleValueType(0); 8097 8098 // Determine the narrow size for a widening add/sub. 8099 unsigned NarrowSize = VT.getScalarSizeInBits() / 2; 8100 MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize), 8101 VT.getVectorElementCount()); 8102 8103 SDValue Mask = N->getOperand(2); 8104 SDValue VL = N->getOperand(3); 8105 8106 SDLoc DL(N); 8107 8108 // If the RHS is a sext or zext, we can form a widening op. 8109 if ((Op1.getOpcode() == RISCVISD::VZEXT_VL || 8110 Op1.getOpcode() == RISCVISD::VSEXT_VL) && 8111 Op1.hasOneUse() && Op1.getOperand(1) == Mask && Op1.getOperand(2) == VL) { 8112 unsigned ExtOpc = Op1.getOpcode(); 8113 Op1 = Op1.getOperand(0); 8114 // Re-introduce narrower extends if needed. 8115 if (Op1.getValueType() != NarrowVT) 8116 Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL); 8117 8118 unsigned WOpc; 8119 if (ExtOpc == RISCVISD::VSEXT_VL) 8120 WOpc = IsAdd ? RISCVISD::VWADD_W_VL : RISCVISD::VWSUB_W_VL; 8121 else 8122 WOpc = IsAdd ? RISCVISD::VWADDU_W_VL : RISCVISD::VWSUBU_W_VL; 8123 8124 return DAG.getNode(WOpc, DL, VT, Op0, Op1, Mask, VL); 8125 } 8126 8127 // FIXME: Is it useful to form a vwadd.wx or vwsub.wx if it removes a scalar 8128 // sext/zext? 8129 8130 return SDValue(); 8131 } 8132 8133 // Try to convert vwadd(u).wv/wx or vwsub(u).wv/wx to vwadd(u).vv/vx or 8134 // vwsub(u).vv/vx. 8135 static SDValue combineVWADD_W_VL_VWSUB_W_VL(SDNode *N, SelectionDAG &DAG) { 8136 SDValue Op0 = N->getOperand(0); 8137 SDValue Op1 = N->getOperand(1); 8138 SDValue Mask = N->getOperand(2); 8139 SDValue VL = N->getOperand(3); 8140 8141 MVT VT = N->getSimpleValueType(0); 8142 MVT NarrowVT = Op1.getSimpleValueType(); 8143 unsigned NarrowSize = NarrowVT.getScalarSizeInBits(); 8144 8145 unsigned VOpc; 8146 switch (N->getOpcode()) { 8147 default: llvm_unreachable("Unexpected opcode"); 8148 case RISCVISD::VWADD_W_VL: VOpc = RISCVISD::VWADD_VL; break; 8149 case RISCVISD::VWSUB_W_VL: VOpc = RISCVISD::VWSUB_VL; break; 8150 case RISCVISD::VWADDU_W_VL: VOpc = RISCVISD::VWADDU_VL; break; 8151 case RISCVISD::VWSUBU_W_VL: VOpc = RISCVISD::VWSUBU_VL; break; 8152 } 8153 8154 bool IsSigned = N->getOpcode() == RISCVISD::VWADD_W_VL || 8155 N->getOpcode() == RISCVISD::VWSUB_W_VL; 8156 8157 SDLoc DL(N); 8158 8159 // If the LHS is a sext or zext, we can narrow this op to the same size as 8160 // the RHS. 8161 if (((Op0.getOpcode() == RISCVISD::VZEXT_VL && !IsSigned) || 8162 (Op0.getOpcode() == RISCVISD::VSEXT_VL && IsSigned)) && 8163 Op0.hasOneUse() && Op0.getOperand(1) == Mask && Op0.getOperand(2) == VL) { 8164 unsigned ExtOpc = Op0.getOpcode(); 8165 Op0 = Op0.getOperand(0); 8166 // Re-introduce narrower extends if needed. 8167 if (Op0.getValueType() != NarrowVT) 8168 Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL); 8169 return DAG.getNode(VOpc, DL, VT, Op0, Op1, Mask, VL); 8170 } 8171 8172 bool IsAdd = N->getOpcode() == RISCVISD::VWADD_W_VL || 8173 N->getOpcode() == RISCVISD::VWADDU_W_VL; 8174 8175 // Look for splats on the left hand side of a vwadd(u).wv. We might be able 8176 // to commute and use a vwadd(u).vx instead. 8177 if (IsAdd && Op0.getOpcode() == RISCVISD::VMV_V_X_VL && 8178 Op0.getOperand(0).isUndef() && Op0.getOperand(2) == VL) { 8179 Op0 = Op0.getOperand(1); 8180 8181 // See if have enough sign bits or zero bits in the scalar to use a 8182 // widening add/sub by splatting to smaller element size. 8183 unsigned EltBits = VT.getScalarSizeInBits(); 8184 unsigned ScalarBits = Op0.getValueSizeInBits(); 8185 // Make sure we're getting all element bits from the scalar register. 8186 // FIXME: Support implicit sign extension of vmv.v.x? 8187 if (ScalarBits < EltBits) 8188 return SDValue(); 8189 8190 if (IsSigned) { 8191 if (DAG.ComputeNumSignBits(Op0) <= (ScalarBits - NarrowSize)) 8192 return SDValue(); 8193 } else { 8194 APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize); 8195 if (!DAG.MaskedValueIsZero(Op0, Mask)) 8196 return SDValue(); 8197 } 8198 8199 Op0 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, 8200 DAG.getUNDEF(NarrowVT), Op0, VL); 8201 return DAG.getNode(VOpc, DL, VT, Op1, Op0, Mask, VL); 8202 } 8203 8204 return SDValue(); 8205 } 8206 8207 // Try to form VWMUL, VWMULU or VWMULSU. 8208 // TODO: Support VWMULSU.vx with a sign extend Op and a splat of scalar Op. 8209 static SDValue combineMUL_VLToVWMUL_VL(SDNode *N, SelectionDAG &DAG, 8210 bool Commute) { 8211 assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode"); 8212 SDValue Op0 = N->getOperand(0); 8213 SDValue Op1 = N->getOperand(1); 8214 if (Commute) 8215 std::swap(Op0, Op1); 8216 8217 bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL; 8218 bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL; 8219 bool IsVWMULSU = IsSignExt && Op1.getOpcode() == RISCVISD::VZEXT_VL; 8220 if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse()) 8221 return SDValue(); 8222 8223 SDValue Mask = N->getOperand(2); 8224 SDValue VL = N->getOperand(3); 8225 8226 // Make sure the mask and VL match. 8227 if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL) 8228 return SDValue(); 8229 8230 MVT VT = N->getSimpleValueType(0); 8231 8232 // Determine the narrow size for a widening multiply. 8233 unsigned NarrowSize = VT.getScalarSizeInBits() / 2; 8234 MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize), 8235 VT.getVectorElementCount()); 8236 8237 SDLoc DL(N); 8238 8239 // See if the other operand is the same opcode. 8240 if (IsVWMULSU || Op0.getOpcode() == Op1.getOpcode()) { 8241 if (!Op1.hasOneUse()) 8242 return SDValue(); 8243 8244 // Make sure the mask and VL match. 8245 if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL) 8246 return SDValue(); 8247 8248 Op1 = Op1.getOperand(0); 8249 } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) { 8250 // The operand is a splat of a scalar. 8251 8252 // The pasthru must be undef for tail agnostic 8253 if (!Op1.getOperand(0).isUndef()) 8254 return SDValue(); 8255 // The VL must be the same. 8256 if (Op1.getOperand(2) != VL) 8257 return SDValue(); 8258 8259 // Get the scalar value. 8260 Op1 = Op1.getOperand(1); 8261 8262 // See if have enough sign bits or zero bits in the scalar to use a 8263 // widening multiply by splatting to smaller element size. 8264 unsigned EltBits = VT.getScalarSizeInBits(); 8265 unsigned ScalarBits = Op1.getValueSizeInBits(); 8266 // Make sure we're getting all element bits from the scalar register. 8267 // FIXME: Support implicit sign extension of vmv.v.x? 8268 if (ScalarBits < EltBits) 8269 return SDValue(); 8270 8271 // If the LHS is a sign extend, try to use vwmul. 8272 if (IsSignExt && DAG.ComputeNumSignBits(Op1) > (ScalarBits - NarrowSize)) { 8273 // Can use vwmul. 8274 } else { 8275 // Otherwise try to use vwmulu or vwmulsu. 8276 APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize); 8277 if (DAG.MaskedValueIsZero(Op1, Mask)) 8278 IsVWMULSU = IsSignExt; 8279 else 8280 return SDValue(); 8281 } 8282 8283 Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, 8284 DAG.getUNDEF(NarrowVT), Op1, VL); 8285 } else 8286 return SDValue(); 8287 8288 Op0 = Op0.getOperand(0); 8289 8290 // Re-introduce narrower extends if needed. 8291 unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL; 8292 if (Op0.getValueType() != NarrowVT) 8293 Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL); 8294 // vwmulsu requires second operand to be zero extended. 8295 ExtOpc = IsVWMULSU ? RISCVISD::VZEXT_VL : ExtOpc; 8296 if (Op1.getValueType() != NarrowVT) 8297 Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL); 8298 8299 unsigned WMulOpc = RISCVISD::VWMULSU_VL; 8300 if (!IsVWMULSU) 8301 WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL; 8302 return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL); 8303 } 8304 8305 static RISCVFPRndMode::RoundingMode matchRoundingOp(SDValue Op) { 8306 switch (Op.getOpcode()) { 8307 case ISD::FROUNDEVEN: return RISCVFPRndMode::RNE; 8308 case ISD::FTRUNC: return RISCVFPRndMode::RTZ; 8309 case ISD::FFLOOR: return RISCVFPRndMode::RDN; 8310 case ISD::FCEIL: return RISCVFPRndMode::RUP; 8311 case ISD::FROUND: return RISCVFPRndMode::RMM; 8312 } 8313 8314 return RISCVFPRndMode::Invalid; 8315 } 8316 8317 // Fold 8318 // (fp_to_int (froundeven X)) -> fcvt X, rne 8319 // (fp_to_int (ftrunc X)) -> fcvt X, rtz 8320 // (fp_to_int (ffloor X)) -> fcvt X, rdn 8321 // (fp_to_int (fceil X)) -> fcvt X, rup 8322 // (fp_to_int (fround X)) -> fcvt X, rmm 8323 static SDValue performFP_TO_INTCombine(SDNode *N, 8324 TargetLowering::DAGCombinerInfo &DCI, 8325 const RISCVSubtarget &Subtarget) { 8326 SelectionDAG &DAG = DCI.DAG; 8327 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8328 MVT XLenVT = Subtarget.getXLenVT(); 8329 8330 // Only handle XLen or i32 types. Other types narrower than XLen will 8331 // eventually be legalized to XLenVT. 8332 EVT VT = N->getValueType(0); 8333 if (VT != MVT::i32 && VT != XLenVT) 8334 return SDValue(); 8335 8336 SDValue Src = N->getOperand(0); 8337 8338 // Ensure the FP type is also legal. 8339 if (!TLI.isTypeLegal(Src.getValueType())) 8340 return SDValue(); 8341 8342 // Don't do this for f16 with Zfhmin and not Zfh. 8343 if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh()) 8344 return SDValue(); 8345 8346 RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src); 8347 if (FRM == RISCVFPRndMode::Invalid) 8348 return SDValue(); 8349 8350 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; 8351 8352 unsigned Opc; 8353 if (VT == XLenVT) 8354 Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU; 8355 else 8356 Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64; 8357 8358 SDLoc DL(N); 8359 SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src.getOperand(0), 8360 DAG.getTargetConstant(FRM, DL, XLenVT)); 8361 return DAG.getNode(ISD::TRUNCATE, DL, VT, FpToInt); 8362 } 8363 8364 // Fold 8365 // (fp_to_int_sat (froundeven X)) -> (select X == nan, 0, (fcvt X, rne)) 8366 // (fp_to_int_sat (ftrunc X)) -> (select X == nan, 0, (fcvt X, rtz)) 8367 // (fp_to_int_sat (ffloor X)) -> (select X == nan, 0, (fcvt X, rdn)) 8368 // (fp_to_int_sat (fceil X)) -> (select X == nan, 0, (fcvt X, rup)) 8369 // (fp_to_int_sat (fround X)) -> (select X == nan, 0, (fcvt X, rmm)) 8370 static SDValue performFP_TO_INT_SATCombine(SDNode *N, 8371 TargetLowering::DAGCombinerInfo &DCI, 8372 const RISCVSubtarget &Subtarget) { 8373 SelectionDAG &DAG = DCI.DAG; 8374 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 8375 MVT XLenVT = Subtarget.getXLenVT(); 8376 8377 // Only handle XLen types. Other types narrower than XLen will eventually be 8378 // legalized to XLenVT. 8379 EVT DstVT = N->getValueType(0); 8380 if (DstVT != XLenVT) 8381 return SDValue(); 8382 8383 SDValue Src = N->getOperand(0); 8384 8385 // Ensure the FP type is also legal. 8386 if (!TLI.isTypeLegal(Src.getValueType())) 8387 return SDValue(); 8388 8389 // Don't do this for f16 with Zfhmin and not Zfh. 8390 if (Src.getValueType() == MVT::f16 && !Subtarget.hasStdExtZfh()) 8391 return SDValue(); 8392 8393 EVT SatVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 8394 8395 RISCVFPRndMode::RoundingMode FRM = matchRoundingOp(Src); 8396 if (FRM == RISCVFPRndMode::Invalid) 8397 return SDValue(); 8398 8399 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT_SAT; 8400 8401 unsigned Opc; 8402 if (SatVT == DstVT) 8403 Opc = IsSigned ? RISCVISD::FCVT_X : RISCVISD::FCVT_XU; 8404 else if (DstVT == MVT::i64 && SatVT == MVT::i32) 8405 Opc = IsSigned ? RISCVISD::FCVT_W_RV64 : RISCVISD::FCVT_WU_RV64; 8406 else 8407 return SDValue(); 8408 // FIXME: Support other SatVTs by clamping before or after the conversion. 8409 8410 Src = Src.getOperand(0); 8411 8412 SDLoc DL(N); 8413 SDValue FpToInt = DAG.getNode(Opc, DL, XLenVT, Src, 8414 DAG.getTargetConstant(FRM, DL, XLenVT)); 8415 8416 // RISCV FP-to-int conversions saturate to the destination register size, but 8417 // don't produce 0 for nan. 8418 SDValue ZeroInt = DAG.getConstant(0, DL, DstVT); 8419 return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO); 8420 } 8421 8422 // Combine (bitreverse (bswap X)) to the BREV8 GREVI encoding if the type is 8423 // smaller than XLenVT. 8424 static SDValue performBITREVERSECombine(SDNode *N, SelectionDAG &DAG, 8425 const RISCVSubtarget &Subtarget) { 8426 assert(Subtarget.hasStdExtZbkb() && "Unexpected extension"); 8427 8428 SDValue Src = N->getOperand(0); 8429 if (Src.getOpcode() != ISD::BSWAP) 8430 return SDValue(); 8431 8432 EVT VT = N->getValueType(0); 8433 if (!VT.isScalarInteger() || VT.getSizeInBits() >= Subtarget.getXLen() || 8434 !isPowerOf2_32(VT.getSizeInBits())) 8435 return SDValue(); 8436 8437 SDLoc DL(N); 8438 return DAG.getNode(RISCVISD::GREV, DL, VT, Src.getOperand(0), 8439 DAG.getConstant(7, DL, VT)); 8440 } 8441 8442 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N, 8443 DAGCombinerInfo &DCI) const { 8444 SelectionDAG &DAG = DCI.DAG; 8445 8446 // Helper to call SimplifyDemandedBits on an operand of N where only some low 8447 // bits are demanded. N will be added to the Worklist if it was not deleted. 8448 // Caller should return SDValue(N, 0) if this returns true. 8449 auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) { 8450 SDValue Op = N->getOperand(OpNo); 8451 APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits); 8452 if (!SimplifyDemandedBits(Op, Mask, DCI)) 8453 return false; 8454 8455 if (N->getOpcode() != ISD::DELETED_NODE) 8456 DCI.AddToWorklist(N); 8457 return true; 8458 }; 8459 8460 switch (N->getOpcode()) { 8461 default: 8462 break; 8463 case RISCVISD::SplitF64: { 8464 SDValue Op0 = N->getOperand(0); 8465 // If the input to SplitF64 is just BuildPairF64 then the operation is 8466 // redundant. Instead, use BuildPairF64's operands directly. 8467 if (Op0->getOpcode() == RISCVISD::BuildPairF64) 8468 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1)); 8469 8470 if (Op0->isUndef()) { 8471 SDValue Lo = DAG.getUNDEF(MVT::i32); 8472 SDValue Hi = DAG.getUNDEF(MVT::i32); 8473 return DCI.CombineTo(N, Lo, Hi); 8474 } 8475 8476 SDLoc DL(N); 8477 8478 // It's cheaper to materialise two 32-bit integers than to load a double 8479 // from the constant pool and transfer it to integer registers through the 8480 // stack. 8481 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) { 8482 APInt V = C->getValueAPF().bitcastToAPInt(); 8483 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32); 8484 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32); 8485 return DCI.CombineTo(N, Lo, Hi); 8486 } 8487 8488 // This is a target-specific version of a DAGCombine performed in 8489 // DAGCombiner::visitBITCAST. It performs the equivalent of: 8490 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 8491 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 8492 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 8493 !Op0.getNode()->hasOneUse()) 8494 break; 8495 SDValue NewSplitF64 = 8496 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), 8497 Op0.getOperand(0)); 8498 SDValue Lo = NewSplitF64.getValue(0); 8499 SDValue Hi = NewSplitF64.getValue(1); 8500 APInt SignBit = APInt::getSignMask(32); 8501 if (Op0.getOpcode() == ISD::FNEG) { 8502 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi, 8503 DAG.getConstant(SignBit, DL, MVT::i32)); 8504 return DCI.CombineTo(N, Lo, NewHi); 8505 } 8506 assert(Op0.getOpcode() == ISD::FABS); 8507 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi, 8508 DAG.getConstant(~SignBit, DL, MVT::i32)); 8509 return DCI.CombineTo(N, Lo, NewHi); 8510 } 8511 case RISCVISD::SLLW: 8512 case RISCVISD::SRAW: 8513 case RISCVISD::SRLW: { 8514 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 8515 if (SimplifyDemandedLowBitsHelper(0, 32) || 8516 SimplifyDemandedLowBitsHelper(1, 5)) 8517 return SDValue(N, 0); 8518 8519 break; 8520 } 8521 case ISD::ROTR: 8522 case ISD::ROTL: 8523 case RISCVISD::RORW: 8524 case RISCVISD::ROLW: { 8525 if (N->getOpcode() == RISCVISD::RORW || N->getOpcode() == RISCVISD::ROLW) { 8526 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 8527 if (SimplifyDemandedLowBitsHelper(0, 32) || 8528 SimplifyDemandedLowBitsHelper(1, 5)) 8529 return SDValue(N, 0); 8530 } 8531 8532 return combineROTR_ROTL_RORW_ROLW(N, DAG, Subtarget); 8533 } 8534 case RISCVISD::CLZW: 8535 case RISCVISD::CTZW: { 8536 // Only the lower 32 bits of the first operand are read 8537 if (SimplifyDemandedLowBitsHelper(0, 32)) 8538 return SDValue(N, 0); 8539 break; 8540 } 8541 case RISCVISD::GREV: 8542 case RISCVISD::GORC: { 8543 // Only the lower log2(Bitwidth) bits of the the shift amount are read. 8544 unsigned BitWidth = N->getOperand(1).getValueSizeInBits(); 8545 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width"); 8546 if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth))) 8547 return SDValue(N, 0); 8548 8549 return combineGREVI_GORCI(N, DAG); 8550 } 8551 case RISCVISD::GREVW: 8552 case RISCVISD::GORCW: { 8553 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read. 8554 if (SimplifyDemandedLowBitsHelper(0, 32) || 8555 SimplifyDemandedLowBitsHelper(1, 5)) 8556 return SDValue(N, 0); 8557 8558 break; 8559 } 8560 case RISCVISD::SHFL: 8561 case RISCVISD::UNSHFL: { 8562 // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read. 8563 unsigned BitWidth = N->getOperand(1).getValueSizeInBits(); 8564 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width"); 8565 if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1)) 8566 return SDValue(N, 0); 8567 8568 break; 8569 } 8570 case RISCVISD::SHFLW: 8571 case RISCVISD::UNSHFLW: { 8572 // Only the lower 32 bits of LHS and lower 4 bits of RHS are read. 8573 if (SimplifyDemandedLowBitsHelper(0, 32) || 8574 SimplifyDemandedLowBitsHelper(1, 4)) 8575 return SDValue(N, 0); 8576 8577 break; 8578 } 8579 case RISCVISD::BCOMPRESSW: 8580 case RISCVISD::BDECOMPRESSW: { 8581 // Only the lower 32 bits of LHS and RHS are read. 8582 if (SimplifyDemandedLowBitsHelper(0, 32) || 8583 SimplifyDemandedLowBitsHelper(1, 32)) 8584 return SDValue(N, 0); 8585 8586 break; 8587 } 8588 case RISCVISD::FSR: 8589 case RISCVISD::FSL: 8590 case RISCVISD::FSRW: 8591 case RISCVISD::FSLW: { 8592 bool IsWInstruction = 8593 N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW; 8594 unsigned BitWidth = 8595 IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits(); 8596 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width"); 8597 // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read. 8598 if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1)) 8599 return SDValue(N, 0); 8600 8601 break; 8602 } 8603 case RISCVISD::FMV_X_ANYEXTH: 8604 case RISCVISD::FMV_X_ANYEXTW_RV64: { 8605 SDLoc DL(N); 8606 SDValue Op0 = N->getOperand(0); 8607 MVT VT = N->getSimpleValueType(0); 8608 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the 8609 // conversion is unnecessary and can be replaced with the FMV_W_X_RV64 8610 // operand. Similar for FMV_X_ANYEXTH and FMV_H_X. 8611 if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 && 8612 Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) || 8613 (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH && 8614 Op0->getOpcode() == RISCVISD::FMV_H_X)) { 8615 assert(Op0.getOperand(0).getValueType() == VT && 8616 "Unexpected value type!"); 8617 return Op0.getOperand(0); 8618 } 8619 8620 // This is a target-specific version of a DAGCombine performed in 8621 // DAGCombiner::visitBITCAST. It performs the equivalent of: 8622 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 8623 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 8624 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) || 8625 !Op0.getNode()->hasOneUse()) 8626 break; 8627 SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0)); 8628 unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16; 8629 APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits()); 8630 if (Op0.getOpcode() == ISD::FNEG) 8631 return DAG.getNode(ISD::XOR, DL, VT, NewFMV, 8632 DAG.getConstant(SignBit, DL, VT)); 8633 8634 assert(Op0.getOpcode() == ISD::FABS); 8635 return DAG.getNode(ISD::AND, DL, VT, NewFMV, 8636 DAG.getConstant(~SignBit, DL, VT)); 8637 } 8638 case ISD::ADD: 8639 return performADDCombine(N, DAG, Subtarget); 8640 case ISD::SUB: 8641 return performSUBCombine(N, DAG); 8642 case ISD::AND: 8643 return performANDCombine(N, DAG, Subtarget); 8644 case ISD::OR: 8645 return performORCombine(N, DAG, Subtarget); 8646 case ISD::XOR: 8647 return performXORCombine(N, DAG); 8648 case ISD::FADD: 8649 case ISD::UMAX: 8650 case ISD::UMIN: 8651 case ISD::SMAX: 8652 case ISD::SMIN: 8653 case ISD::FMAXNUM: 8654 case ISD::FMINNUM: 8655 return combineBinOpToReduce(N, DAG); 8656 case ISD::SIGN_EXTEND_INREG: 8657 return performSIGN_EXTEND_INREGCombine(N, DAG, Subtarget); 8658 case ISD::ZERO_EXTEND: 8659 // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during 8660 // type legalization. This is safe because fp_to_uint produces poison if 8661 // it overflows. 8662 if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit()) { 8663 SDValue Src = N->getOperand(0); 8664 if (Src.getOpcode() == ISD::FP_TO_UINT && 8665 isTypeLegal(Src.getOperand(0).getValueType())) 8666 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64, 8667 Src.getOperand(0)); 8668 if (Src.getOpcode() == ISD::STRICT_FP_TO_UINT && Src.hasOneUse() && 8669 isTypeLegal(Src.getOperand(1).getValueType())) { 8670 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Other); 8671 SDValue Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, SDLoc(N), VTs, 8672 Src.getOperand(0), Src.getOperand(1)); 8673 DCI.CombineTo(N, Res); 8674 DAG.ReplaceAllUsesOfValueWith(Src.getValue(1), Res.getValue(1)); 8675 DCI.recursivelyDeleteUnusedNodes(Src.getNode()); 8676 return SDValue(N, 0); // Return N so it doesn't get rechecked. 8677 } 8678 } 8679 return SDValue(); 8680 case RISCVISD::SELECT_CC: { 8681 // Transform 8682 SDValue LHS = N->getOperand(0); 8683 SDValue RHS = N->getOperand(1); 8684 SDValue TrueV = N->getOperand(3); 8685 SDValue FalseV = N->getOperand(4); 8686 8687 // If the True and False values are the same, we don't need a select_cc. 8688 if (TrueV == FalseV) 8689 return TrueV; 8690 8691 ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get(); 8692 if (!ISD::isIntEqualitySetCC(CCVal)) 8693 break; 8694 8695 // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) -> 8696 // (select_cc X, Y, lt, trueV, falseV) 8697 // Sometimes the setcc is introduced after select_cc has been formed. 8698 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && 8699 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { 8700 // If we're looking for eq 0 instead of ne 0, we need to invert the 8701 // condition. 8702 bool Invert = CCVal == ISD::SETEQ; 8703 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 8704 if (Invert) 8705 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 8706 8707 SDLoc DL(N); 8708 RHS = LHS.getOperand(1); 8709 LHS = LHS.getOperand(0); 8710 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 8711 8712 SDValue TargetCC = DAG.getCondCode(CCVal); 8713 return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0), 8714 {LHS, RHS, TargetCC, TrueV, FalseV}); 8715 } 8716 8717 // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) -> 8718 // (select_cc X, Y, eq/ne, trueV, falseV) 8719 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) 8720 return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0), 8721 {LHS.getOperand(0), LHS.getOperand(1), 8722 N->getOperand(2), TrueV, FalseV}); 8723 // (select_cc X, 1, setne, trueV, falseV) -> 8724 // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1. 8725 // This can occur when legalizing some floating point comparisons. 8726 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 8727 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { 8728 SDLoc DL(N); 8729 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 8730 SDValue TargetCC = DAG.getCondCode(CCVal); 8731 RHS = DAG.getConstant(0, DL, LHS.getValueType()); 8732 return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0), 8733 {LHS, RHS, TargetCC, TrueV, FalseV}); 8734 } 8735 8736 break; 8737 } 8738 case RISCVISD::BR_CC: { 8739 SDValue LHS = N->getOperand(1); 8740 SDValue RHS = N->getOperand(2); 8741 ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get(); 8742 if (!ISD::isIntEqualitySetCC(CCVal)) 8743 break; 8744 8745 // Fold (br_cc (setlt X, Y), 0, ne, dest) -> 8746 // (br_cc X, Y, lt, dest) 8747 // Sometimes the setcc is introduced after br_cc has been formed. 8748 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) && 8749 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) { 8750 // If we're looking for eq 0 instead of ne 0, we need to invert the 8751 // condition. 8752 bool Invert = CCVal == ISD::SETEQ; 8753 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get(); 8754 if (Invert) 8755 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 8756 8757 SDLoc DL(N); 8758 RHS = LHS.getOperand(1); 8759 LHS = LHS.getOperand(0); 8760 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG); 8761 8762 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), 8763 N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal), 8764 N->getOperand(4)); 8765 } 8766 8767 // Fold (br_cc (xor X, Y), 0, eq/ne, dest) -> 8768 // (br_cc X, Y, eq/ne, trueV, falseV) 8769 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS)) 8770 return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0), 8771 N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1), 8772 N->getOperand(3), N->getOperand(4)); 8773 8774 // (br_cc X, 1, setne, br_cc) -> 8775 // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1. 8776 // This can occur when legalizing some floating point comparisons. 8777 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1); 8778 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) { 8779 SDLoc DL(N); 8780 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType()); 8781 SDValue TargetCC = DAG.getCondCode(CCVal); 8782 RHS = DAG.getConstant(0, DL, LHS.getValueType()); 8783 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0), 8784 N->getOperand(0), LHS, RHS, TargetCC, 8785 N->getOperand(4)); 8786 } 8787 break; 8788 } 8789 case ISD::BITREVERSE: 8790 return performBITREVERSECombine(N, DAG, Subtarget); 8791 case ISD::FP_TO_SINT: 8792 case ISD::FP_TO_UINT: 8793 return performFP_TO_INTCombine(N, DCI, Subtarget); 8794 case ISD::FP_TO_SINT_SAT: 8795 case ISD::FP_TO_UINT_SAT: 8796 return performFP_TO_INT_SATCombine(N, DCI, Subtarget); 8797 case ISD::FCOPYSIGN: { 8798 EVT VT = N->getValueType(0); 8799 if (!VT.isVector()) 8800 break; 8801 // There is a form of VFSGNJ which injects the negated sign of its second 8802 // operand. Try and bubble any FNEG up after the extend/round to produce 8803 // this optimized pattern. Avoid modifying cases where FP_ROUND and 8804 // TRUNC=1. 8805 SDValue In2 = N->getOperand(1); 8806 // Avoid cases where the extend/round has multiple uses, as duplicating 8807 // those is typically more expensive than removing a fneg. 8808 if (!In2.hasOneUse()) 8809 break; 8810 if (In2.getOpcode() != ISD::FP_EXTEND && 8811 (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0)) 8812 break; 8813 In2 = In2.getOperand(0); 8814 if (In2.getOpcode() != ISD::FNEG) 8815 break; 8816 SDLoc DL(N); 8817 SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT); 8818 return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0), 8819 DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound)); 8820 } 8821 case ISD::MGATHER: 8822 case ISD::MSCATTER: 8823 case ISD::VP_GATHER: 8824 case ISD::VP_SCATTER: { 8825 if (!DCI.isBeforeLegalize()) 8826 break; 8827 SDValue Index, ScaleOp; 8828 bool IsIndexScaled = false; 8829 bool IsIndexSigned = false; 8830 if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) { 8831 Index = VPGSN->getIndex(); 8832 ScaleOp = VPGSN->getScale(); 8833 IsIndexScaled = VPGSN->isIndexScaled(); 8834 IsIndexSigned = VPGSN->isIndexSigned(); 8835 } else { 8836 const auto *MGSN = cast<MaskedGatherScatterSDNode>(N); 8837 Index = MGSN->getIndex(); 8838 ScaleOp = MGSN->getScale(); 8839 IsIndexScaled = MGSN->isIndexScaled(); 8840 IsIndexSigned = MGSN->isIndexSigned(); 8841 } 8842 EVT IndexVT = Index.getValueType(); 8843 MVT XLenVT = Subtarget.getXLenVT(); 8844 // RISCV indexed loads only support the "unsigned unscaled" addressing 8845 // mode, so anything else must be manually legalized. 8846 bool NeedsIdxLegalization = 8847 IsIndexScaled || 8848 (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT)); 8849 if (!NeedsIdxLegalization) 8850 break; 8851 8852 SDLoc DL(N); 8853 8854 // Any index legalization should first promote to XLenVT, so we don't lose 8855 // bits when scaling. This may create an illegal index type so we let 8856 // LLVM's legalization take care of the splitting. 8857 // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet. 8858 if (IndexVT.getVectorElementType().bitsLT(XLenVT)) { 8859 IndexVT = IndexVT.changeVectorElementType(XLenVT); 8860 Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, 8861 DL, IndexVT, Index); 8862 } 8863 8864 unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue(); 8865 if (IsIndexScaled && Scale != 1) { 8866 // Manually scale the indices by the element size. 8867 // TODO: Sanitize the scale operand here? 8868 // TODO: For VP nodes, should we use VP_SHL here? 8869 assert(isPowerOf2_32(Scale) && "Expecting power-of-two types"); 8870 SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT); 8871 Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale); 8872 } 8873 8874 ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED; 8875 if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N)) 8876 return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL, 8877 {VPGN->getChain(), VPGN->getBasePtr(), Index, 8878 VPGN->getScale(), VPGN->getMask(), 8879 VPGN->getVectorLength()}, 8880 VPGN->getMemOperand(), NewIndexTy); 8881 if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N)) 8882 return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL, 8883 {VPSN->getChain(), VPSN->getValue(), 8884 VPSN->getBasePtr(), Index, VPSN->getScale(), 8885 VPSN->getMask(), VPSN->getVectorLength()}, 8886 VPSN->getMemOperand(), NewIndexTy); 8887 if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N)) 8888 return DAG.getMaskedGather( 8889 N->getVTList(), MGN->getMemoryVT(), DL, 8890 {MGN->getChain(), MGN->getPassThru(), MGN->getMask(), 8891 MGN->getBasePtr(), Index, MGN->getScale()}, 8892 MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType()); 8893 const auto *MSN = cast<MaskedScatterSDNode>(N); 8894 return DAG.getMaskedScatter( 8895 N->getVTList(), MSN->getMemoryVT(), DL, 8896 {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(), 8897 Index, MSN->getScale()}, 8898 MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore()); 8899 } 8900 case RISCVISD::SRA_VL: 8901 case RISCVISD::SRL_VL: 8902 case RISCVISD::SHL_VL: { 8903 SDValue ShAmt = N->getOperand(1); 8904 if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) { 8905 // We don't need the upper 32 bits of a 64-bit element for a shift amount. 8906 SDLoc DL(N); 8907 SDValue VL = N->getOperand(3); 8908 EVT VT = N->getValueType(0); 8909 ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT), 8910 ShAmt.getOperand(1), VL); 8911 return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt, 8912 N->getOperand(2), N->getOperand(3)); 8913 } 8914 break; 8915 } 8916 case ISD::SRA: 8917 case ISD::SRL: 8918 case ISD::SHL: { 8919 SDValue ShAmt = N->getOperand(1); 8920 if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) { 8921 // We don't need the upper 32 bits of a 64-bit element for a shift amount. 8922 SDLoc DL(N); 8923 EVT VT = N->getValueType(0); 8924 ShAmt = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, DAG.getUNDEF(VT), 8925 ShAmt.getOperand(1), 8926 DAG.getRegister(RISCV::X0, Subtarget.getXLenVT())); 8927 return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt); 8928 } 8929 break; 8930 } 8931 case RISCVISD::ADD_VL: 8932 if (SDValue V = combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ false)) 8933 return V; 8934 return combineADDSUB_VLToVWADDSUB_VL(N, DAG, /*Commute*/ true); 8935 case RISCVISD::SUB_VL: 8936 return combineADDSUB_VLToVWADDSUB_VL(N, DAG); 8937 case RISCVISD::VWADD_W_VL: 8938 case RISCVISD::VWADDU_W_VL: 8939 case RISCVISD::VWSUB_W_VL: 8940 case RISCVISD::VWSUBU_W_VL: 8941 return combineVWADD_W_VL_VWSUB_W_VL(N, DAG); 8942 case RISCVISD::MUL_VL: 8943 if (SDValue V = combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ false)) 8944 return V; 8945 // Mul is commutative. 8946 return combineMUL_VLToVWMUL_VL(N, DAG, /*Commute*/ true); 8947 case ISD::STORE: { 8948 auto *Store = cast<StoreSDNode>(N); 8949 SDValue Val = Store->getValue(); 8950 // Combine store of vmv.x.s to vse with VL of 1. 8951 // FIXME: Support FP. 8952 if (Val.getOpcode() == RISCVISD::VMV_X_S) { 8953 SDValue Src = Val.getOperand(0); 8954 EVT VecVT = Src.getValueType(); 8955 EVT MemVT = Store->getMemoryVT(); 8956 // The memory VT and the element type must match. 8957 if (VecVT.getVectorElementType() == MemVT) { 8958 SDLoc DL(N); 8959 MVT MaskVT = getMaskTypeFor(VecVT); 8960 return DAG.getStoreVP( 8961 Store->getChain(), DL, Src, Store->getBasePtr(), Store->getOffset(), 8962 DAG.getConstant(1, DL, MaskVT), 8963 DAG.getConstant(1, DL, Subtarget.getXLenVT()), MemVT, 8964 Store->getMemOperand(), Store->getAddressingMode(), 8965 Store->isTruncatingStore(), /*IsCompress*/ false); 8966 } 8967 } 8968 8969 break; 8970 } 8971 case ISD::SPLAT_VECTOR: { 8972 EVT VT = N->getValueType(0); 8973 // Only perform this combine on legal MVT types. 8974 if (!isTypeLegal(VT)) 8975 break; 8976 if (auto Gather = matchSplatAsGather(N->getOperand(0), VT.getSimpleVT(), N, 8977 DAG, Subtarget)) 8978 return Gather; 8979 break; 8980 } 8981 case RISCVISD::VMV_V_X_VL: { 8982 // Tail agnostic VMV.V.X only demands the vector element bitwidth from the 8983 // scalar input. 8984 unsigned ScalarSize = N->getOperand(1).getValueSizeInBits(); 8985 unsigned EltWidth = N->getValueType(0).getScalarSizeInBits(); 8986 if (ScalarSize > EltWidth && N->getOperand(0).isUndef()) 8987 if (SimplifyDemandedLowBitsHelper(1, EltWidth)) 8988 return SDValue(N, 0); 8989 8990 break; 8991 } 8992 case ISD::INTRINSIC_WO_CHAIN: { 8993 unsigned IntNo = N->getConstantOperandVal(0); 8994 switch (IntNo) { 8995 // By default we do not combine any intrinsic. 8996 default: 8997 return SDValue(); 8998 case Intrinsic::riscv_vcpop: 8999 case Intrinsic::riscv_vcpop_mask: 9000 case Intrinsic::riscv_vfirst: 9001 case Intrinsic::riscv_vfirst_mask: { 9002 SDValue VL = N->getOperand(2); 9003 if (IntNo == Intrinsic::riscv_vcpop_mask || 9004 IntNo == Intrinsic::riscv_vfirst_mask) 9005 VL = N->getOperand(3); 9006 if (!isNullConstant(VL)) 9007 return SDValue(); 9008 // If VL is 0, vcpop -> li 0, vfirst -> li -1. 9009 SDLoc DL(N); 9010 EVT VT = N->getValueType(0); 9011 if (IntNo == Intrinsic::riscv_vfirst || 9012 IntNo == Intrinsic::riscv_vfirst_mask) 9013 return DAG.getConstant(-1, DL, VT); 9014 return DAG.getConstant(0, DL, VT); 9015 } 9016 } 9017 } 9018 } 9019 9020 return SDValue(); 9021 } 9022 9023 bool RISCVTargetLowering::isDesirableToCommuteWithShift( 9024 const SDNode *N, CombineLevel Level) const { 9025 // The following folds are only desirable if `(OP _, c1 << c2)` can be 9026 // materialised in fewer instructions than `(OP _, c1)`: 9027 // 9028 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 9029 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2) 9030 SDValue N0 = N->getOperand(0); 9031 EVT Ty = N0.getValueType(); 9032 if (Ty.isScalarInteger() && 9033 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) { 9034 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 9035 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1)); 9036 if (C1 && C2) { 9037 const APInt &C1Int = C1->getAPIntValue(); 9038 APInt ShiftedC1Int = C1Int << C2->getAPIntValue(); 9039 9040 // We can materialise `c1 << c2` into an add immediate, so it's "free", 9041 // and the combine should happen, to potentially allow further combines 9042 // later. 9043 if (ShiftedC1Int.getMinSignedBits() <= 64 && 9044 isLegalAddImmediate(ShiftedC1Int.getSExtValue())) 9045 return true; 9046 9047 // We can materialise `c1` in an add immediate, so it's "free", and the 9048 // combine should be prevented. 9049 if (C1Int.getMinSignedBits() <= 64 && 9050 isLegalAddImmediate(C1Int.getSExtValue())) 9051 return false; 9052 9053 // Neither constant will fit into an immediate, so find materialisation 9054 // costs. 9055 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(), 9056 Subtarget.getFeatureBits(), 9057 /*CompressionCost*/true); 9058 int ShiftedC1Cost = RISCVMatInt::getIntMatCost( 9059 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(), 9060 /*CompressionCost*/true); 9061 9062 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the 9063 // combine should be prevented. 9064 if (C1Cost < ShiftedC1Cost) 9065 return false; 9066 } 9067 } 9068 return true; 9069 } 9070 9071 bool RISCVTargetLowering::targetShrinkDemandedConstant( 9072 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 9073 TargetLoweringOpt &TLO) const { 9074 // Delay this optimization as late as possible. 9075 if (!TLO.LegalOps) 9076 return false; 9077 9078 EVT VT = Op.getValueType(); 9079 if (VT.isVector()) 9080 return false; 9081 9082 // Only handle AND for now. 9083 if (Op.getOpcode() != ISD::AND) 9084 return false; 9085 9086 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 9087 if (!C) 9088 return false; 9089 9090 const APInt &Mask = C->getAPIntValue(); 9091 9092 // Clear all non-demanded bits initially. 9093 APInt ShrunkMask = Mask & DemandedBits; 9094 9095 // Try to make a smaller immediate by setting undemanded bits. 9096 9097 APInt ExpandedMask = Mask | ~DemandedBits; 9098 9099 auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool { 9100 return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask); 9101 }; 9102 auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool { 9103 if (NewMask == Mask) 9104 return true; 9105 SDLoc DL(Op); 9106 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT); 9107 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC); 9108 return TLO.CombineTo(Op, NewOp); 9109 }; 9110 9111 // If the shrunk mask fits in sign extended 12 bits, let the target 9112 // independent code apply it. 9113 if (ShrunkMask.isSignedIntN(12)) 9114 return false; 9115 9116 // Preserve (and X, 0xffff) when zext.h is supported. 9117 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) { 9118 APInt NewMask = APInt(Mask.getBitWidth(), 0xffff); 9119 if (IsLegalMask(NewMask)) 9120 return UseMask(NewMask); 9121 } 9122 9123 // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern. 9124 if (VT == MVT::i64) { 9125 APInt NewMask = APInt(64, 0xffffffff); 9126 if (IsLegalMask(NewMask)) 9127 return UseMask(NewMask); 9128 } 9129 9130 // For the remaining optimizations, we need to be able to make a negative 9131 // number through a combination of mask and undemanded bits. 9132 if (!ExpandedMask.isNegative()) 9133 return false; 9134 9135 // What is the fewest number of bits we need to represent the negative number. 9136 unsigned MinSignedBits = ExpandedMask.getMinSignedBits(); 9137 9138 // Try to make a 12 bit negative immediate. If that fails try to make a 32 9139 // bit negative immediate unless the shrunk immediate already fits in 32 bits. 9140 APInt NewMask = ShrunkMask; 9141 if (MinSignedBits <= 12) 9142 NewMask.setBitsFrom(11); 9143 else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32)) 9144 NewMask.setBitsFrom(31); 9145 else 9146 return false; 9147 9148 // Check that our new mask is a subset of the demanded mask. 9149 assert(IsLegalMask(NewMask)); 9150 return UseMask(NewMask); 9151 } 9152 9153 static uint64_t computeGREVOrGORC(uint64_t x, unsigned ShAmt, bool IsGORC) { 9154 static const uint64_t GREVMasks[] = { 9155 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 9156 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; 9157 9158 for (unsigned Stage = 0; Stage != 6; ++Stage) { 9159 unsigned Shift = 1 << Stage; 9160 if (ShAmt & Shift) { 9161 uint64_t Mask = GREVMasks[Stage]; 9162 uint64_t Res = ((x & Mask) << Shift) | ((x >> Shift) & Mask); 9163 if (IsGORC) 9164 Res |= x; 9165 x = Res; 9166 } 9167 } 9168 9169 return x; 9170 } 9171 9172 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 9173 KnownBits &Known, 9174 const APInt &DemandedElts, 9175 const SelectionDAG &DAG, 9176 unsigned Depth) const { 9177 unsigned BitWidth = Known.getBitWidth(); 9178 unsigned Opc = Op.getOpcode(); 9179 assert((Opc >= ISD::BUILTIN_OP_END || 9180 Opc == ISD::INTRINSIC_WO_CHAIN || 9181 Opc == ISD::INTRINSIC_W_CHAIN || 9182 Opc == ISD::INTRINSIC_VOID) && 9183 "Should use MaskedValueIsZero if you don't know whether Op" 9184 " is a target node!"); 9185 9186 Known.resetAll(); 9187 switch (Opc) { 9188 default: break; 9189 case RISCVISD::SELECT_CC: { 9190 Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1); 9191 // If we don't know any bits, early out. 9192 if (Known.isUnknown()) 9193 break; 9194 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1); 9195 9196 // Only known if known in both the LHS and RHS. 9197 Known = KnownBits::commonBits(Known, Known2); 9198 break; 9199 } 9200 case RISCVISD::REMUW: { 9201 KnownBits Known2; 9202 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 9203 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 9204 // We only care about the lower 32 bits. 9205 Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32)); 9206 // Restore the original width by sign extending. 9207 Known = Known.sext(BitWidth); 9208 break; 9209 } 9210 case RISCVISD::DIVUW: { 9211 KnownBits Known2; 9212 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 9213 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 9214 // We only care about the lower 32 bits. 9215 Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32)); 9216 // Restore the original width by sign extending. 9217 Known = Known.sext(BitWidth); 9218 break; 9219 } 9220 case RISCVISD::CTZW: { 9221 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 9222 unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros(); 9223 unsigned LowBits = Log2_32(PossibleTZ) + 1; 9224 Known.Zero.setBitsFrom(LowBits); 9225 break; 9226 } 9227 case RISCVISD::CLZW: { 9228 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 9229 unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros(); 9230 unsigned LowBits = Log2_32(PossibleLZ) + 1; 9231 Known.Zero.setBitsFrom(LowBits); 9232 break; 9233 } 9234 case RISCVISD::GREV: 9235 case RISCVISD::GORC: { 9236 if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 9237 Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 9238 unsigned ShAmt = C->getZExtValue() & (Known.getBitWidth() - 1); 9239 bool IsGORC = Op.getOpcode() == RISCVISD::GORC; 9240 // To compute zeros, we need to invert the value and invert it back after. 9241 Known.Zero = 9242 ~computeGREVOrGORC(~Known.Zero.getZExtValue(), ShAmt, IsGORC); 9243 Known.One = computeGREVOrGORC(Known.One.getZExtValue(), ShAmt, IsGORC); 9244 } 9245 break; 9246 } 9247 case RISCVISD::READ_VLENB: { 9248 // If we know the minimum VLen from Zvl extensions, we can use that to 9249 // determine the trailing zeros of VLENB. 9250 // FIXME: Limit to 128 bit vectors until we have more testing. 9251 unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8; 9252 if (MinVLenB > 0) 9253 Known.Zero.setLowBits(Log2_32(MinVLenB)); 9254 // We assume VLENB is no more than 65536 / 8 bytes. 9255 Known.Zero.setBitsFrom(14); 9256 break; 9257 } 9258 case ISD::INTRINSIC_W_CHAIN: 9259 case ISD::INTRINSIC_WO_CHAIN: { 9260 unsigned IntNo = 9261 Op.getConstantOperandVal(Opc == ISD::INTRINSIC_WO_CHAIN ? 0 : 1); 9262 switch (IntNo) { 9263 default: 9264 // We can't do anything for most intrinsics. 9265 break; 9266 case Intrinsic::riscv_vsetvli: 9267 case Intrinsic::riscv_vsetvlimax: 9268 case Intrinsic::riscv_vsetvli_opt: 9269 case Intrinsic::riscv_vsetvlimax_opt: 9270 // Assume that VL output is positive and would fit in an int32_t. 9271 // TODO: VLEN might be capped at 16 bits in a future V spec update. 9272 if (BitWidth >= 32) 9273 Known.Zero.setBitsFrom(31); 9274 break; 9275 } 9276 break; 9277 } 9278 } 9279 } 9280 9281 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode( 9282 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 9283 unsigned Depth) const { 9284 switch (Op.getOpcode()) { 9285 default: 9286 break; 9287 case RISCVISD::SELECT_CC: { 9288 unsigned Tmp = 9289 DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1); 9290 if (Tmp == 1) return 1; // Early out. 9291 unsigned Tmp2 = 9292 DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1); 9293 return std::min(Tmp, Tmp2); 9294 } 9295 case RISCVISD::SLLW: 9296 case RISCVISD::SRAW: 9297 case RISCVISD::SRLW: 9298 case RISCVISD::DIVW: 9299 case RISCVISD::DIVUW: 9300 case RISCVISD::REMUW: 9301 case RISCVISD::ROLW: 9302 case RISCVISD::RORW: 9303 case RISCVISD::GREVW: 9304 case RISCVISD::GORCW: 9305 case RISCVISD::FSLW: 9306 case RISCVISD::FSRW: 9307 case RISCVISD::SHFLW: 9308 case RISCVISD::UNSHFLW: 9309 case RISCVISD::BCOMPRESSW: 9310 case RISCVISD::BDECOMPRESSW: 9311 case RISCVISD::BFPW: 9312 case RISCVISD::FCVT_W_RV64: 9313 case RISCVISD::FCVT_WU_RV64: 9314 case RISCVISD::STRICT_FCVT_W_RV64: 9315 case RISCVISD::STRICT_FCVT_WU_RV64: 9316 // TODO: As the result is sign-extended, this is conservatively correct. A 9317 // more precise answer could be calculated for SRAW depending on known 9318 // bits in the shift amount. 9319 return 33; 9320 case RISCVISD::SHFL: 9321 case RISCVISD::UNSHFL: { 9322 // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word 9323 // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but 9324 // will stay within the upper 32 bits. If there were more than 32 sign bits 9325 // before there will be at least 33 sign bits after. 9326 if (Op.getValueType() == MVT::i64 && 9327 isa<ConstantSDNode>(Op.getOperand(1)) && 9328 (Op.getConstantOperandVal(1) & 0x10) == 0) { 9329 unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 9330 if (Tmp > 32) 9331 return 33; 9332 } 9333 break; 9334 } 9335 case RISCVISD::VMV_X_S: { 9336 // The number of sign bits of the scalar result is computed by obtaining the 9337 // element type of the input vector operand, subtracting its width from the 9338 // XLEN, and then adding one (sign bit within the element type). If the 9339 // element type is wider than XLen, the least-significant XLEN bits are 9340 // taken. 9341 unsigned XLen = Subtarget.getXLen(); 9342 unsigned EltBits = Op.getOperand(0).getScalarValueSizeInBits(); 9343 if (EltBits <= XLen) 9344 return XLen - EltBits + 1; 9345 break; 9346 } 9347 } 9348 9349 return 1; 9350 } 9351 9352 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI, 9353 MachineBasicBlock *BB) { 9354 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction"); 9355 9356 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves. 9357 // Should the count have wrapped while it was being read, we need to try 9358 // again. 9359 // ... 9360 // read: 9361 // rdcycleh x3 # load high word of cycle 9362 // rdcycle x2 # load low word of cycle 9363 // rdcycleh x4 # load high word of cycle 9364 // bne x3, x4, read # check if high word reads match, otherwise try again 9365 // ... 9366 9367 MachineFunction &MF = *BB->getParent(); 9368 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9369 MachineFunction::iterator It = ++BB->getIterator(); 9370 9371 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB); 9372 MF.insert(It, LoopMBB); 9373 9374 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB); 9375 MF.insert(It, DoneMBB); 9376 9377 // Transfer the remainder of BB and its successor edges to DoneMBB. 9378 DoneMBB->splice(DoneMBB->begin(), BB, 9379 std::next(MachineBasicBlock::iterator(MI)), BB->end()); 9380 DoneMBB->transferSuccessorsAndUpdatePHIs(BB); 9381 9382 BB->addSuccessor(LoopMBB); 9383 9384 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 9385 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 9386 Register LoReg = MI.getOperand(0).getReg(); 9387 Register HiReg = MI.getOperand(1).getReg(); 9388 DebugLoc DL = MI.getDebugLoc(); 9389 9390 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo(); 9391 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg) 9392 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 9393 .addReg(RISCV::X0); 9394 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg) 9395 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding) 9396 .addReg(RISCV::X0); 9397 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg) 9398 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding) 9399 .addReg(RISCV::X0); 9400 9401 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE)) 9402 .addReg(HiReg) 9403 .addReg(ReadAgainReg) 9404 .addMBB(LoopMBB); 9405 9406 LoopMBB->addSuccessor(LoopMBB); 9407 LoopMBB->addSuccessor(DoneMBB); 9408 9409 MI.eraseFromParent(); 9410 9411 return DoneMBB; 9412 } 9413 9414 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI, 9415 MachineBasicBlock *BB) { 9416 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction"); 9417 9418 MachineFunction &MF = *BB->getParent(); 9419 DebugLoc DL = MI.getDebugLoc(); 9420 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 9421 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 9422 Register LoReg = MI.getOperand(0).getReg(); 9423 Register HiReg = MI.getOperand(1).getReg(); 9424 Register SrcReg = MI.getOperand(2).getReg(); 9425 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass; 9426 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 9427 9428 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC, 9429 RI); 9430 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 9431 MachineMemOperand *MMOLo = 9432 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8)); 9433 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 9434 MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8)); 9435 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg) 9436 .addFrameIndex(FI) 9437 .addImm(0) 9438 .addMemOperand(MMOLo); 9439 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg) 9440 .addFrameIndex(FI) 9441 .addImm(4) 9442 .addMemOperand(MMOHi); 9443 MI.eraseFromParent(); // The pseudo instruction is gone now. 9444 return BB; 9445 } 9446 9447 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI, 9448 MachineBasicBlock *BB) { 9449 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo && 9450 "Unexpected instruction"); 9451 9452 MachineFunction &MF = *BB->getParent(); 9453 DebugLoc DL = MI.getDebugLoc(); 9454 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo(); 9455 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo(); 9456 Register DstReg = MI.getOperand(0).getReg(); 9457 Register LoReg = MI.getOperand(1).getReg(); 9458 Register HiReg = MI.getOperand(2).getReg(); 9459 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass; 9460 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF); 9461 9462 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 9463 MachineMemOperand *MMOLo = 9464 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8)); 9465 MachineMemOperand *MMOHi = MF.getMachineMemOperand( 9466 MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8)); 9467 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 9468 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill())) 9469 .addFrameIndex(FI) 9470 .addImm(0) 9471 .addMemOperand(MMOLo); 9472 BuildMI(*BB, MI, DL, TII.get(RISCV::SW)) 9473 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill())) 9474 .addFrameIndex(FI) 9475 .addImm(4) 9476 .addMemOperand(MMOHi); 9477 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI); 9478 MI.eraseFromParent(); // The pseudo instruction is gone now. 9479 return BB; 9480 } 9481 9482 static bool isSelectPseudo(MachineInstr &MI) { 9483 switch (MI.getOpcode()) { 9484 default: 9485 return false; 9486 case RISCV::Select_GPR_Using_CC_GPR: 9487 case RISCV::Select_FPR16_Using_CC_GPR: 9488 case RISCV::Select_FPR32_Using_CC_GPR: 9489 case RISCV::Select_FPR64_Using_CC_GPR: 9490 return true; 9491 } 9492 } 9493 9494 static MachineBasicBlock *emitQuietFCMP(MachineInstr &MI, MachineBasicBlock *BB, 9495 unsigned RelOpcode, unsigned EqOpcode, 9496 const RISCVSubtarget &Subtarget) { 9497 DebugLoc DL = MI.getDebugLoc(); 9498 Register DstReg = MI.getOperand(0).getReg(); 9499 Register Src1Reg = MI.getOperand(1).getReg(); 9500 Register Src2Reg = MI.getOperand(2).getReg(); 9501 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 9502 Register SavedFFlags = MRI.createVirtualRegister(&RISCV::GPRRegClass); 9503 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo(); 9504 9505 // Save the current FFLAGS. 9506 BuildMI(*BB, MI, DL, TII.get(RISCV::ReadFFLAGS), SavedFFlags); 9507 9508 auto MIB = BuildMI(*BB, MI, DL, TII.get(RelOpcode), DstReg) 9509 .addReg(Src1Reg) 9510 .addReg(Src2Reg); 9511 if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept)) 9512 MIB->setFlag(MachineInstr::MIFlag::NoFPExcept); 9513 9514 // Restore the FFLAGS. 9515 BuildMI(*BB, MI, DL, TII.get(RISCV::WriteFFLAGS)) 9516 .addReg(SavedFFlags, RegState::Kill); 9517 9518 // Issue a dummy FEQ opcode to raise exception for signaling NaNs. 9519 auto MIB2 = BuildMI(*BB, MI, DL, TII.get(EqOpcode), RISCV::X0) 9520 .addReg(Src1Reg, getKillRegState(MI.getOperand(1).isKill())) 9521 .addReg(Src2Reg, getKillRegState(MI.getOperand(2).isKill())); 9522 if (MI.getFlag(MachineInstr::MIFlag::NoFPExcept)) 9523 MIB2->setFlag(MachineInstr::MIFlag::NoFPExcept); 9524 9525 // Erase the pseudoinstruction. 9526 MI.eraseFromParent(); 9527 return BB; 9528 } 9529 9530 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, 9531 MachineBasicBlock *BB, 9532 const RISCVSubtarget &Subtarget) { 9533 // To "insert" Select_* instructions, we actually have to insert the triangle 9534 // control-flow pattern. The incoming instructions know the destination vreg 9535 // to set, the condition code register to branch on, the true/false values to 9536 // select between, and the condcode to use to select the appropriate branch. 9537 // 9538 // We produce the following control flow: 9539 // HeadMBB 9540 // | \ 9541 // | IfFalseMBB 9542 // | / 9543 // TailMBB 9544 // 9545 // When we find a sequence of selects we attempt to optimize their emission 9546 // by sharing the control flow. Currently we only handle cases where we have 9547 // multiple selects with the exact same condition (same LHS, RHS and CC). 9548 // The selects may be interleaved with other instructions if the other 9549 // instructions meet some requirements we deem safe: 9550 // - They are debug instructions. Otherwise, 9551 // - They do not have side-effects, do not access memory and their inputs do 9552 // not depend on the results of the select pseudo-instructions. 9553 // The TrueV/FalseV operands of the selects cannot depend on the result of 9554 // previous selects in the sequence. 9555 // These conditions could be further relaxed. See the X86 target for a 9556 // related approach and more information. 9557 Register LHS = MI.getOperand(1).getReg(); 9558 Register RHS = MI.getOperand(2).getReg(); 9559 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm()); 9560 9561 SmallVector<MachineInstr *, 4> SelectDebugValues; 9562 SmallSet<Register, 4> SelectDests; 9563 SelectDests.insert(MI.getOperand(0).getReg()); 9564 9565 MachineInstr *LastSelectPseudo = &MI; 9566 9567 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI); 9568 SequenceMBBI != E; ++SequenceMBBI) { 9569 if (SequenceMBBI->isDebugInstr()) 9570 continue; 9571 if (isSelectPseudo(*SequenceMBBI)) { 9572 if (SequenceMBBI->getOperand(1).getReg() != LHS || 9573 SequenceMBBI->getOperand(2).getReg() != RHS || 9574 SequenceMBBI->getOperand(3).getImm() != CC || 9575 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) || 9576 SelectDests.count(SequenceMBBI->getOperand(5).getReg())) 9577 break; 9578 LastSelectPseudo = &*SequenceMBBI; 9579 SequenceMBBI->collectDebugValues(SelectDebugValues); 9580 SelectDests.insert(SequenceMBBI->getOperand(0).getReg()); 9581 } else { 9582 if (SequenceMBBI->hasUnmodeledSideEffects() || 9583 SequenceMBBI->mayLoadOrStore()) 9584 break; 9585 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) { 9586 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg()); 9587 })) 9588 break; 9589 } 9590 } 9591 9592 const RISCVInstrInfo &TII = *Subtarget.getInstrInfo(); 9593 const BasicBlock *LLVM_BB = BB->getBasicBlock(); 9594 DebugLoc DL = MI.getDebugLoc(); 9595 MachineFunction::iterator I = ++BB->getIterator(); 9596 9597 MachineBasicBlock *HeadMBB = BB; 9598 MachineFunction *F = BB->getParent(); 9599 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB); 9600 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB); 9601 9602 F->insert(I, IfFalseMBB); 9603 F->insert(I, TailMBB); 9604 9605 // Transfer debug instructions associated with the selects to TailMBB. 9606 for (MachineInstr *DebugInstr : SelectDebugValues) { 9607 TailMBB->push_back(DebugInstr->removeFromParent()); 9608 } 9609 9610 // Move all instructions after the sequence to TailMBB. 9611 TailMBB->splice(TailMBB->end(), HeadMBB, 9612 std::next(LastSelectPseudo->getIterator()), HeadMBB->end()); 9613 // Update machine-CFG edges by transferring all successors of the current 9614 // block to the new block which will contain the Phi nodes for the selects. 9615 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB); 9616 // Set the successors for HeadMBB. 9617 HeadMBB->addSuccessor(IfFalseMBB); 9618 HeadMBB->addSuccessor(TailMBB); 9619 9620 // Insert appropriate branch. 9621 BuildMI(HeadMBB, DL, TII.getBrCond(CC)) 9622 .addReg(LHS) 9623 .addReg(RHS) 9624 .addMBB(TailMBB); 9625 9626 // IfFalseMBB just falls through to TailMBB. 9627 IfFalseMBB->addSuccessor(TailMBB); 9628 9629 // Create PHIs for all of the select pseudo-instructions. 9630 auto SelectMBBI = MI.getIterator(); 9631 auto SelectEnd = std::next(LastSelectPseudo->getIterator()); 9632 auto InsertionPoint = TailMBB->begin(); 9633 while (SelectMBBI != SelectEnd) { 9634 auto Next = std::next(SelectMBBI); 9635 if (isSelectPseudo(*SelectMBBI)) { 9636 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ] 9637 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(), 9638 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg()) 9639 .addReg(SelectMBBI->getOperand(4).getReg()) 9640 .addMBB(HeadMBB) 9641 .addReg(SelectMBBI->getOperand(5).getReg()) 9642 .addMBB(IfFalseMBB); 9643 SelectMBBI->eraseFromParent(); 9644 } 9645 SelectMBBI = Next; 9646 } 9647 9648 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs); 9649 return TailMBB; 9650 } 9651 9652 MachineBasicBlock * 9653 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 9654 MachineBasicBlock *BB) const { 9655 switch (MI.getOpcode()) { 9656 default: 9657 llvm_unreachable("Unexpected instr type to insert"); 9658 case RISCV::ReadCycleWide: 9659 assert(!Subtarget.is64Bit() && 9660 "ReadCycleWrite is only to be used on riscv32"); 9661 return emitReadCycleWidePseudo(MI, BB); 9662 case RISCV::Select_GPR_Using_CC_GPR: 9663 case RISCV::Select_FPR16_Using_CC_GPR: 9664 case RISCV::Select_FPR32_Using_CC_GPR: 9665 case RISCV::Select_FPR64_Using_CC_GPR: 9666 return emitSelectPseudo(MI, BB, Subtarget); 9667 case RISCV::BuildPairF64Pseudo: 9668 return emitBuildPairF64Pseudo(MI, BB); 9669 case RISCV::SplitF64Pseudo: 9670 return emitSplitF64Pseudo(MI, BB); 9671 case RISCV::PseudoQuietFLE_H: 9672 return emitQuietFCMP(MI, BB, RISCV::FLE_H, RISCV::FEQ_H, Subtarget); 9673 case RISCV::PseudoQuietFLT_H: 9674 return emitQuietFCMP(MI, BB, RISCV::FLT_H, RISCV::FEQ_H, Subtarget); 9675 case RISCV::PseudoQuietFLE_S: 9676 return emitQuietFCMP(MI, BB, RISCV::FLE_S, RISCV::FEQ_S, Subtarget); 9677 case RISCV::PseudoQuietFLT_S: 9678 return emitQuietFCMP(MI, BB, RISCV::FLT_S, RISCV::FEQ_S, Subtarget); 9679 case RISCV::PseudoQuietFLE_D: 9680 return emitQuietFCMP(MI, BB, RISCV::FLE_D, RISCV::FEQ_D, Subtarget); 9681 case RISCV::PseudoQuietFLT_D: 9682 return emitQuietFCMP(MI, BB, RISCV::FLT_D, RISCV::FEQ_D, Subtarget); 9683 } 9684 } 9685 9686 void RISCVTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI, 9687 SDNode *Node) const { 9688 // Add FRM dependency to any instructions with dynamic rounding mode. 9689 unsigned Opc = MI.getOpcode(); 9690 auto Idx = RISCV::getNamedOperandIdx(Opc, RISCV::OpName::frm); 9691 if (Idx < 0) 9692 return; 9693 if (MI.getOperand(Idx).getImm() != RISCVFPRndMode::DYN) 9694 return; 9695 // If the instruction already reads FRM, don't add another read. 9696 if (MI.readsRegister(RISCV::FRM)) 9697 return; 9698 MI.addOperand( 9699 MachineOperand::CreateReg(RISCV::FRM, /*isDef*/ false, /*isImp*/ true)); 9700 } 9701 9702 // Calling Convention Implementation. 9703 // The expectations for frontend ABI lowering vary from target to target. 9704 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI 9705 // details, but this is a longer term goal. For now, we simply try to keep the 9706 // role of the frontend as simple and well-defined as possible. The rules can 9707 // be summarised as: 9708 // * Never split up large scalar arguments. We handle them here. 9709 // * If a hardfloat calling convention is being used, and the struct may be 9710 // passed in a pair of registers (fp+fp, int+fp), and both registers are 9711 // available, then pass as two separate arguments. If either the GPRs or FPRs 9712 // are exhausted, then pass according to the rule below. 9713 // * If a struct could never be passed in registers or directly in a stack 9714 // slot (as it is larger than 2*XLEN and the floating point rules don't 9715 // apply), then pass it using a pointer with the byval attribute. 9716 // * If a struct is less than 2*XLEN, then coerce to either a two-element 9717 // word-sized array or a 2*XLEN scalar (depending on alignment). 9718 // * The frontend can determine whether a struct is returned by reference or 9719 // not based on its size and fields. If it will be returned by reference, the 9720 // frontend must modify the prototype so a pointer with the sret annotation is 9721 // passed as the first argument. This is not necessary for large scalar 9722 // returns. 9723 // * Struct return values and varargs should be coerced to structs containing 9724 // register-size fields in the same situations they would be for fixed 9725 // arguments. 9726 9727 static const MCPhysReg ArgGPRs[] = { 9728 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, 9729 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17 9730 }; 9731 static const MCPhysReg ArgFPR16s[] = { 9732 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, 9733 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H 9734 }; 9735 static const MCPhysReg ArgFPR32s[] = { 9736 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, 9737 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F 9738 }; 9739 static const MCPhysReg ArgFPR64s[] = { 9740 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, 9741 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D 9742 }; 9743 // This is an interim calling convention and it may be changed in the future. 9744 static const MCPhysReg ArgVRs[] = { 9745 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13, 9746 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19, 9747 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23}; 9748 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2, 9749 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2, 9750 RISCV::V20M2, RISCV::V22M2}; 9751 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4, 9752 RISCV::V20M4}; 9753 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8}; 9754 9755 // Pass a 2*XLEN argument that has been split into two XLEN values through 9756 // registers or the stack as necessary. 9757 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1, 9758 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2, 9759 MVT ValVT2, MVT LocVT2, 9760 ISD::ArgFlagsTy ArgFlags2) { 9761 unsigned XLenInBytes = XLen / 8; 9762 if (Register Reg = State.AllocateReg(ArgGPRs)) { 9763 // At least one half can be passed via register. 9764 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg, 9765 VA1.getLocVT(), CCValAssign::Full)); 9766 } else { 9767 // Both halves must be passed on the stack, with proper alignment. 9768 Align StackAlign = 9769 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign()); 9770 State.addLoc( 9771 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(), 9772 State.AllocateStack(XLenInBytes, StackAlign), 9773 VA1.getLocVT(), CCValAssign::Full)); 9774 State.addLoc(CCValAssign::getMem( 9775 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 9776 LocVT2, CCValAssign::Full)); 9777 return false; 9778 } 9779 9780 if (Register Reg = State.AllocateReg(ArgGPRs)) { 9781 // The second half can also be passed via register. 9782 State.addLoc( 9783 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full)); 9784 } else { 9785 // The second half is passed via the stack, without additional alignment. 9786 State.addLoc(CCValAssign::getMem( 9787 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)), 9788 LocVT2, CCValAssign::Full)); 9789 } 9790 9791 return false; 9792 } 9793 9794 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo, 9795 Optional<unsigned> FirstMaskArgument, 9796 CCState &State, const RISCVTargetLowering &TLI) { 9797 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT); 9798 if (RC == &RISCV::VRRegClass) { 9799 // Assign the first mask argument to V0. 9800 // This is an interim calling convention and it may be changed in the 9801 // future. 9802 if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue()) 9803 return State.AllocateReg(RISCV::V0); 9804 return State.AllocateReg(ArgVRs); 9805 } 9806 if (RC == &RISCV::VRM2RegClass) 9807 return State.AllocateReg(ArgVRM2s); 9808 if (RC == &RISCV::VRM4RegClass) 9809 return State.AllocateReg(ArgVRM4s); 9810 if (RC == &RISCV::VRM8RegClass) 9811 return State.AllocateReg(ArgVRM8s); 9812 llvm_unreachable("Unhandled register class for ValueType"); 9813 } 9814 9815 // Implements the RISC-V calling convention. Returns true upon failure. 9816 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo, 9817 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo, 9818 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed, 9819 bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI, 9820 Optional<unsigned> FirstMaskArgument) { 9821 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits(); 9822 assert(XLen == 32 || XLen == 64); 9823 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64; 9824 9825 // Any return value split in to more than two values can't be returned 9826 // directly. Vectors are returned via the available vector registers. 9827 if (!LocVT.isVector() && IsRet && ValNo > 1) 9828 return true; 9829 9830 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a 9831 // variadic argument, or if no F16/F32 argument registers are available. 9832 bool UseGPRForF16_F32 = true; 9833 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a 9834 // variadic argument, or if no F64 argument registers are available. 9835 bool UseGPRForF64 = true; 9836 9837 switch (ABI) { 9838 default: 9839 llvm_unreachable("Unexpected ABI"); 9840 case RISCVABI::ABI_ILP32: 9841 case RISCVABI::ABI_LP64: 9842 break; 9843 case RISCVABI::ABI_ILP32F: 9844 case RISCVABI::ABI_LP64F: 9845 UseGPRForF16_F32 = !IsFixed; 9846 break; 9847 case RISCVABI::ABI_ILP32D: 9848 case RISCVABI::ABI_LP64D: 9849 UseGPRForF16_F32 = !IsFixed; 9850 UseGPRForF64 = !IsFixed; 9851 break; 9852 } 9853 9854 // FPR16, FPR32, and FPR64 alias each other. 9855 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) { 9856 UseGPRForF16_F32 = true; 9857 UseGPRForF64 = true; 9858 } 9859 9860 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and 9861 // similar local variables rather than directly checking against the target 9862 // ABI. 9863 9864 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) { 9865 LocVT = XLenVT; 9866 LocInfo = CCValAssign::BCvt; 9867 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) { 9868 LocVT = MVT::i64; 9869 LocInfo = CCValAssign::BCvt; 9870 } 9871 9872 // If this is a variadic argument, the RISC-V calling convention requires 9873 // that it is assigned an 'even' or 'aligned' register if it has 8-byte 9874 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should 9875 // be used regardless of whether the original argument was split during 9876 // legalisation or not. The argument will not be passed by registers if the 9877 // original type is larger than 2*XLEN, so the register alignment rule does 9878 // not apply. 9879 unsigned TwoXLenInBytes = (2 * XLen) / 8; 9880 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes && 9881 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) { 9882 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs); 9883 // Skip 'odd' register if necessary. 9884 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1) 9885 State.AllocateReg(ArgGPRs); 9886 } 9887 9888 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs(); 9889 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags = 9890 State.getPendingArgFlags(); 9891 9892 assert(PendingLocs.size() == PendingArgFlags.size() && 9893 "PendingLocs and PendingArgFlags out of sync"); 9894 9895 // Handle passing f64 on RV32D with a soft float ABI or when floating point 9896 // registers are exhausted. 9897 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) { 9898 assert(!ArgFlags.isSplit() && PendingLocs.empty() && 9899 "Can't lower f64 if it is split"); 9900 // Depending on available argument GPRS, f64 may be passed in a pair of 9901 // GPRs, split between a GPR and the stack, or passed completely on the 9902 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these 9903 // cases. 9904 Register Reg = State.AllocateReg(ArgGPRs); 9905 LocVT = MVT::i32; 9906 if (!Reg) { 9907 unsigned StackOffset = State.AllocateStack(8, Align(8)); 9908 State.addLoc( 9909 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 9910 return false; 9911 } 9912 if (!State.AllocateReg(ArgGPRs)) 9913 State.AllocateStack(4, Align(4)); 9914 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 9915 return false; 9916 } 9917 9918 // Fixed-length vectors are located in the corresponding scalable-vector 9919 // container types. 9920 if (ValVT.isFixedLengthVector()) 9921 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 9922 9923 // Split arguments might be passed indirectly, so keep track of the pending 9924 // values. Split vectors are passed via a mix of registers and indirectly, so 9925 // treat them as we would any other argument. 9926 if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) { 9927 LocVT = XLenVT; 9928 LocInfo = CCValAssign::Indirect; 9929 PendingLocs.push_back( 9930 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo)); 9931 PendingArgFlags.push_back(ArgFlags); 9932 if (!ArgFlags.isSplitEnd()) { 9933 return false; 9934 } 9935 } 9936 9937 // If the split argument only had two elements, it should be passed directly 9938 // in registers or on the stack. 9939 if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() && 9940 PendingLocs.size() <= 2) { 9941 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()"); 9942 // Apply the normal calling convention rules to the first half of the 9943 // split argument. 9944 CCValAssign VA = PendingLocs[0]; 9945 ISD::ArgFlagsTy AF = PendingArgFlags[0]; 9946 PendingLocs.clear(); 9947 PendingArgFlags.clear(); 9948 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT, 9949 ArgFlags); 9950 } 9951 9952 // Allocate to a register if possible, or else a stack slot. 9953 Register Reg; 9954 unsigned StoreSizeBytes = XLen / 8; 9955 Align StackAlign = Align(XLen / 8); 9956 9957 if (ValVT == MVT::f16 && !UseGPRForF16_F32) 9958 Reg = State.AllocateReg(ArgFPR16s); 9959 else if (ValVT == MVT::f32 && !UseGPRForF16_F32) 9960 Reg = State.AllocateReg(ArgFPR32s); 9961 else if (ValVT == MVT::f64 && !UseGPRForF64) 9962 Reg = State.AllocateReg(ArgFPR64s); 9963 else if (ValVT.isVector()) { 9964 Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI); 9965 if (!Reg) { 9966 // For return values, the vector must be passed fully via registers or 9967 // via the stack. 9968 // FIXME: The proposed vector ABI only mandates v8-v15 for return values, 9969 // but we're using all of them. 9970 if (IsRet) 9971 return true; 9972 // Try using a GPR to pass the address 9973 if ((Reg = State.AllocateReg(ArgGPRs))) { 9974 LocVT = XLenVT; 9975 LocInfo = CCValAssign::Indirect; 9976 } else if (ValVT.isScalableVector()) { 9977 LocVT = XLenVT; 9978 LocInfo = CCValAssign::Indirect; 9979 } else { 9980 // Pass fixed-length vectors on the stack. 9981 LocVT = ValVT; 9982 StoreSizeBytes = ValVT.getStoreSize(); 9983 // Align vectors to their element sizes, being careful for vXi1 9984 // vectors. 9985 StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 9986 } 9987 } 9988 } else { 9989 Reg = State.AllocateReg(ArgGPRs); 9990 } 9991 9992 unsigned StackOffset = 9993 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign); 9994 9995 // If we reach this point and PendingLocs is non-empty, we must be at the 9996 // end of a split argument that must be passed indirectly. 9997 if (!PendingLocs.empty()) { 9998 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()"); 9999 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()"); 10000 10001 for (auto &It : PendingLocs) { 10002 if (Reg) 10003 It.convertToReg(Reg); 10004 else 10005 It.convertToMem(StackOffset); 10006 State.addLoc(It); 10007 } 10008 PendingLocs.clear(); 10009 PendingArgFlags.clear(); 10010 return false; 10011 } 10012 10013 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT || 10014 (TLI.getSubtarget().hasVInstructions() && ValVT.isVector())) && 10015 "Expected an XLenVT or vector types at this stage"); 10016 10017 if (Reg) { 10018 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 10019 return false; 10020 } 10021 10022 // When a floating-point value is passed on the stack, no bit-conversion is 10023 // needed. 10024 if (ValVT.isFloatingPoint()) { 10025 LocVT = ValVT; 10026 LocInfo = CCValAssign::Full; 10027 } 10028 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 10029 return false; 10030 } 10031 10032 template <typename ArgTy> 10033 static Optional<unsigned> preAssignMask(const ArgTy &Args) { 10034 for (const auto &ArgIdx : enumerate(Args)) { 10035 MVT ArgVT = ArgIdx.value().VT; 10036 if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1) 10037 return ArgIdx.index(); 10038 } 10039 return None; 10040 } 10041 10042 void RISCVTargetLowering::analyzeInputArgs( 10043 MachineFunction &MF, CCState &CCInfo, 10044 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet, 10045 RISCVCCAssignFn Fn) const { 10046 unsigned NumArgs = Ins.size(); 10047 FunctionType *FType = MF.getFunction().getFunctionType(); 10048 10049 Optional<unsigned> FirstMaskArgument; 10050 if (Subtarget.hasVInstructions()) 10051 FirstMaskArgument = preAssignMask(Ins); 10052 10053 for (unsigned i = 0; i != NumArgs; ++i) { 10054 MVT ArgVT = Ins[i].VT; 10055 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 10056 10057 Type *ArgTy = nullptr; 10058 if (IsRet) 10059 ArgTy = FType->getReturnType(); 10060 else if (Ins[i].isOrigArg()) 10061 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex()); 10062 10063 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 10064 if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 10065 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this, 10066 FirstMaskArgument)) { 10067 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type " 10068 << EVT(ArgVT).getEVTString() << '\n'); 10069 llvm_unreachable(nullptr); 10070 } 10071 } 10072 } 10073 10074 void RISCVTargetLowering::analyzeOutputArgs( 10075 MachineFunction &MF, CCState &CCInfo, 10076 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet, 10077 CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const { 10078 unsigned NumArgs = Outs.size(); 10079 10080 Optional<unsigned> FirstMaskArgument; 10081 if (Subtarget.hasVInstructions()) 10082 FirstMaskArgument = preAssignMask(Outs); 10083 10084 for (unsigned i = 0; i != NumArgs; i++) { 10085 MVT ArgVT = Outs[i].VT; 10086 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 10087 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr; 10088 10089 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 10090 if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full, 10091 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this, 10092 FirstMaskArgument)) { 10093 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type " 10094 << EVT(ArgVT).getEVTString() << "\n"); 10095 llvm_unreachable(nullptr); 10096 } 10097 } 10098 } 10099 10100 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect 10101 // values. 10102 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val, 10103 const CCValAssign &VA, const SDLoc &DL, 10104 const RISCVSubtarget &Subtarget) { 10105 switch (VA.getLocInfo()) { 10106 default: 10107 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 10108 case CCValAssign::Full: 10109 if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector()) 10110 Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget); 10111 break; 10112 case CCValAssign::BCvt: 10113 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 10114 Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val); 10115 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 10116 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val); 10117 else 10118 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 10119 break; 10120 } 10121 return Val; 10122 } 10123 10124 // The caller is responsible for loading the full value if the argument is 10125 // passed with CCValAssign::Indirect. 10126 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain, 10127 const CCValAssign &VA, const SDLoc &DL, 10128 const RISCVTargetLowering &TLI) { 10129 MachineFunction &MF = DAG.getMachineFunction(); 10130 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 10131 EVT LocVT = VA.getLocVT(); 10132 SDValue Val; 10133 const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT()); 10134 Register VReg = RegInfo.createVirtualRegister(RC); 10135 RegInfo.addLiveIn(VA.getLocReg(), VReg); 10136 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT); 10137 10138 if (VA.getLocInfo() == CCValAssign::Indirect) 10139 return Val; 10140 10141 return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget()); 10142 } 10143 10144 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val, 10145 const CCValAssign &VA, const SDLoc &DL, 10146 const RISCVSubtarget &Subtarget) { 10147 EVT LocVT = VA.getLocVT(); 10148 10149 switch (VA.getLocInfo()) { 10150 default: 10151 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 10152 case CCValAssign::Full: 10153 if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector()) 10154 Val = convertToScalableVector(LocVT, Val, DAG, Subtarget); 10155 break; 10156 case CCValAssign::BCvt: 10157 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16) 10158 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val); 10159 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) 10160 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val); 10161 else 10162 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val); 10163 break; 10164 } 10165 return Val; 10166 } 10167 10168 // The caller is responsible for loading the full value if the argument is 10169 // passed with CCValAssign::Indirect. 10170 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain, 10171 const CCValAssign &VA, const SDLoc &DL) { 10172 MachineFunction &MF = DAG.getMachineFunction(); 10173 MachineFrameInfo &MFI = MF.getFrameInfo(); 10174 EVT LocVT = VA.getLocVT(); 10175 EVT ValVT = VA.getValVT(); 10176 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0)); 10177 if (ValVT.isScalableVector()) { 10178 // When the value is a scalable vector, we save the pointer which points to 10179 // the scalable vector value in the stack. The ValVT will be the pointer 10180 // type, instead of the scalable vector type. 10181 ValVT = LocVT; 10182 } 10183 int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(), 10184 /*IsImmutable=*/true); 10185 SDValue FIN = DAG.getFrameIndex(FI, PtrVT); 10186 SDValue Val; 10187 10188 ISD::LoadExtType ExtType; 10189 switch (VA.getLocInfo()) { 10190 default: 10191 llvm_unreachable("Unexpected CCValAssign::LocInfo"); 10192 case CCValAssign::Full: 10193 case CCValAssign::Indirect: 10194 case CCValAssign::BCvt: 10195 ExtType = ISD::NON_EXTLOAD; 10196 break; 10197 } 10198 Val = DAG.getExtLoad( 10199 ExtType, DL, LocVT, Chain, FIN, 10200 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT); 10201 return Val; 10202 } 10203 10204 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain, 10205 const CCValAssign &VA, const SDLoc &DL) { 10206 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 && 10207 "Unexpected VA"); 10208 MachineFunction &MF = DAG.getMachineFunction(); 10209 MachineFrameInfo &MFI = MF.getFrameInfo(); 10210 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 10211 10212 if (VA.isMemLoc()) { 10213 // f64 is passed on the stack. 10214 int FI = 10215 MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*IsImmutable=*/true); 10216 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 10217 return DAG.getLoad(MVT::f64, DL, Chain, FIN, 10218 MachinePointerInfo::getFixedStack(MF, FI)); 10219 } 10220 10221 assert(VA.isRegLoc() && "Expected register VA assignment"); 10222 10223 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 10224 RegInfo.addLiveIn(VA.getLocReg(), LoVReg); 10225 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32); 10226 SDValue Hi; 10227 if (VA.getLocReg() == RISCV::X17) { 10228 // Second half of f64 is passed on the stack. 10229 int FI = MFI.CreateFixedObject(4, 0, /*IsImmutable=*/true); 10230 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32); 10231 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN, 10232 MachinePointerInfo::getFixedStack(MF, FI)); 10233 } else { 10234 // Second half of f64 is passed in another GPR. 10235 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass); 10236 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg); 10237 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32); 10238 } 10239 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi); 10240 } 10241 10242 // FastCC has less than 1% performance improvement for some particular 10243 // benchmark. But theoretically, it may has benenfit for some cases. 10244 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI, 10245 unsigned ValNo, MVT ValVT, MVT LocVT, 10246 CCValAssign::LocInfo LocInfo, 10247 ISD::ArgFlagsTy ArgFlags, CCState &State, 10248 bool IsFixed, bool IsRet, Type *OrigTy, 10249 const RISCVTargetLowering &TLI, 10250 Optional<unsigned> FirstMaskArgument) { 10251 10252 // X5 and X6 might be used for save-restore libcall. 10253 static const MCPhysReg GPRList[] = { 10254 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14, 10255 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28, 10256 RISCV::X29, RISCV::X30, RISCV::X31}; 10257 10258 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 10259 if (unsigned Reg = State.AllocateReg(GPRList)) { 10260 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 10261 return false; 10262 } 10263 } 10264 10265 if (LocVT == MVT::f16) { 10266 static const MCPhysReg FPR16List[] = { 10267 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H, 10268 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H, 10269 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H, 10270 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H}; 10271 if (unsigned Reg = State.AllocateReg(FPR16List)) { 10272 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 10273 return false; 10274 } 10275 } 10276 10277 if (LocVT == MVT::f32) { 10278 static const MCPhysReg FPR32List[] = { 10279 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F, 10280 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F, 10281 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F, 10282 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F}; 10283 if (unsigned Reg = State.AllocateReg(FPR32List)) { 10284 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 10285 return false; 10286 } 10287 } 10288 10289 if (LocVT == MVT::f64) { 10290 static const MCPhysReg FPR64List[] = { 10291 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D, 10292 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D, 10293 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D, 10294 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D}; 10295 if (unsigned Reg = State.AllocateReg(FPR64List)) { 10296 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 10297 return false; 10298 } 10299 } 10300 10301 if (LocVT == MVT::i32 || LocVT == MVT::f32) { 10302 unsigned Offset4 = State.AllocateStack(4, Align(4)); 10303 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo)); 10304 return false; 10305 } 10306 10307 if (LocVT == MVT::i64 || LocVT == MVT::f64) { 10308 unsigned Offset5 = State.AllocateStack(8, Align(8)); 10309 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo)); 10310 return false; 10311 } 10312 10313 if (LocVT.isVector()) { 10314 if (unsigned Reg = 10315 allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) { 10316 // Fixed-length vectors are located in the corresponding scalable-vector 10317 // container types. 10318 if (ValVT.isFixedLengthVector()) 10319 LocVT = TLI.getContainerForFixedLengthVector(LocVT); 10320 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 10321 } else { 10322 // Try and pass the address via a "fast" GPR. 10323 if (unsigned GPRReg = State.AllocateReg(GPRList)) { 10324 LocInfo = CCValAssign::Indirect; 10325 LocVT = TLI.getSubtarget().getXLenVT(); 10326 State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo)); 10327 } else if (ValVT.isFixedLengthVector()) { 10328 auto StackAlign = 10329 MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne(); 10330 unsigned StackOffset = 10331 State.AllocateStack(ValVT.getStoreSize(), StackAlign); 10332 State.addLoc( 10333 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo)); 10334 } else { 10335 // Can't pass scalable vectors on the stack. 10336 return true; 10337 } 10338 } 10339 10340 return false; 10341 } 10342 10343 return true; // CC didn't match. 10344 } 10345 10346 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT, 10347 CCValAssign::LocInfo LocInfo, 10348 ISD::ArgFlagsTy ArgFlags, CCState &State) { 10349 10350 if (LocVT == MVT::i32 || LocVT == MVT::i64) { 10351 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim 10352 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11 10353 static const MCPhysReg GPRList[] = { 10354 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22, 10355 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27}; 10356 if (unsigned Reg = State.AllocateReg(GPRList)) { 10357 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 10358 return false; 10359 } 10360 } 10361 10362 if (LocVT == MVT::f32) { 10363 // Pass in STG registers: F1, ..., F6 10364 // fs0 ... fs5 10365 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F, 10366 RISCV::F18_F, RISCV::F19_F, 10367 RISCV::F20_F, RISCV::F21_F}; 10368 if (unsigned Reg = State.AllocateReg(FPR32List)) { 10369 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 10370 return false; 10371 } 10372 } 10373 10374 if (LocVT == MVT::f64) { 10375 // Pass in STG registers: D1, ..., D6 10376 // fs6 ... fs11 10377 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D, 10378 RISCV::F24_D, RISCV::F25_D, 10379 RISCV::F26_D, RISCV::F27_D}; 10380 if (unsigned Reg = State.AllocateReg(FPR64List)) { 10381 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); 10382 return false; 10383 } 10384 } 10385 10386 report_fatal_error("No registers left in GHC calling convention"); 10387 return true; 10388 } 10389 10390 // Transform physical registers into virtual registers. 10391 SDValue RISCVTargetLowering::LowerFormalArguments( 10392 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg, 10393 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 10394 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 10395 10396 MachineFunction &MF = DAG.getMachineFunction(); 10397 10398 switch (CallConv) { 10399 default: 10400 report_fatal_error("Unsupported calling convention"); 10401 case CallingConv::C: 10402 case CallingConv::Fast: 10403 break; 10404 case CallingConv::GHC: 10405 if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] || 10406 !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD]) 10407 report_fatal_error( 10408 "GHC calling convention requires the F and D instruction set extensions"); 10409 } 10410 10411 const Function &Func = MF.getFunction(); 10412 if (Func.hasFnAttribute("interrupt")) { 10413 if (!Func.arg_empty()) 10414 report_fatal_error( 10415 "Functions with the interrupt attribute cannot have arguments!"); 10416 10417 StringRef Kind = 10418 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 10419 10420 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine")) 10421 report_fatal_error( 10422 "Function interrupt attribute argument not supported!"); 10423 } 10424 10425 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10426 MVT XLenVT = Subtarget.getXLenVT(); 10427 unsigned XLenInBytes = Subtarget.getXLen() / 8; 10428 // Used with vargs to acumulate store chains. 10429 std::vector<SDValue> OutChains; 10430 10431 // Assign locations to all of the incoming arguments. 10432 SmallVector<CCValAssign, 16> ArgLocs; 10433 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 10434 10435 if (CallConv == CallingConv::GHC) 10436 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC); 10437 else 10438 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false, 10439 CallConv == CallingConv::Fast ? CC_RISCV_FastCC 10440 : CC_RISCV); 10441 10442 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 10443 CCValAssign &VA = ArgLocs[i]; 10444 SDValue ArgValue; 10445 // Passing f64 on RV32D with a soft float ABI must be handled as a special 10446 // case. 10447 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) 10448 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL); 10449 else if (VA.isRegLoc()) 10450 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this); 10451 else 10452 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL); 10453 10454 if (VA.getLocInfo() == CCValAssign::Indirect) { 10455 // If the original argument was split and passed by reference (e.g. i128 10456 // on RV32), we need to load all parts of it here (using the same 10457 // address). Vectors may be partly split to registers and partly to the 10458 // stack, in which case the base address is partly offset and subsequent 10459 // stores are relative to that. 10460 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue, 10461 MachinePointerInfo())); 10462 unsigned ArgIndex = Ins[i].OrigArgIndex; 10463 unsigned ArgPartOffset = Ins[i].PartOffset; 10464 assert(VA.getValVT().isVector() || ArgPartOffset == 0); 10465 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) { 10466 CCValAssign &PartVA = ArgLocs[i + 1]; 10467 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset; 10468 SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL); 10469 if (PartVA.getValVT().isScalableVector()) 10470 Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset); 10471 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset); 10472 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address, 10473 MachinePointerInfo())); 10474 ++i; 10475 } 10476 continue; 10477 } 10478 InVals.push_back(ArgValue); 10479 } 10480 10481 if (IsVarArg) { 10482 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs); 10483 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); 10484 const TargetRegisterClass *RC = &RISCV::GPRRegClass; 10485 MachineFrameInfo &MFI = MF.getFrameInfo(); 10486 MachineRegisterInfo &RegInfo = MF.getRegInfo(); 10487 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>(); 10488 10489 // Offset of the first variable argument from stack pointer, and size of 10490 // the vararg save area. For now, the varargs save area is either zero or 10491 // large enough to hold a0-a7. 10492 int VaArgOffset, VarArgsSaveSize; 10493 10494 // If all registers are allocated, then all varargs must be passed on the 10495 // stack and we don't need to save any argregs. 10496 if (ArgRegs.size() == Idx) { 10497 VaArgOffset = CCInfo.getNextStackOffset(); 10498 VarArgsSaveSize = 0; 10499 } else { 10500 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); 10501 VaArgOffset = -VarArgsSaveSize; 10502 } 10503 10504 // Record the frame index of the first variable argument 10505 // which is a value necessary to VASTART. 10506 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 10507 RVFI->setVarArgsFrameIndex(FI); 10508 10509 // If saving an odd number of registers then create an extra stack slot to 10510 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures 10511 // offsets to even-numbered registered remain 2*XLEN-aligned. 10512 if (Idx % 2) { 10513 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); 10514 VarArgsSaveSize += XLenInBytes; 10515 } 10516 10517 // Copy the integer registers that may have been used for passing varargs 10518 // to the vararg save area. 10519 for (unsigned I = Idx; I < ArgRegs.size(); 10520 ++I, VaArgOffset += XLenInBytes) { 10521 const Register Reg = RegInfo.createVirtualRegister(RC); 10522 RegInfo.addLiveIn(ArgRegs[I], Reg); 10523 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT); 10524 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); 10525 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 10526 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff, 10527 MachinePointerInfo::getFixedStack(MF, FI)); 10528 cast<StoreSDNode>(Store.getNode()) 10529 ->getMemOperand() 10530 ->setValue((Value *)nullptr); 10531 OutChains.push_back(Store); 10532 } 10533 RVFI->setVarArgsSaveSize(VarArgsSaveSize); 10534 } 10535 10536 // All stores are grouped in one node to allow the matching between 10537 // the size of Ins and InVals. This only happens for vararg functions. 10538 if (!OutChains.empty()) { 10539 OutChains.push_back(Chain); 10540 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains); 10541 } 10542 10543 return Chain; 10544 } 10545 10546 /// isEligibleForTailCallOptimization - Check whether the call is eligible 10547 /// for tail call optimization. 10548 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization. 10549 bool RISCVTargetLowering::isEligibleForTailCallOptimization( 10550 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF, 10551 const SmallVector<CCValAssign, 16> &ArgLocs) const { 10552 10553 auto &Callee = CLI.Callee; 10554 auto CalleeCC = CLI.CallConv; 10555 auto &Outs = CLI.Outs; 10556 auto &Caller = MF.getFunction(); 10557 auto CallerCC = Caller.getCallingConv(); 10558 10559 // Exception-handling functions need a special set of instructions to 10560 // indicate a return to the hardware. Tail-calling another function would 10561 // probably break this. 10562 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This 10563 // should be expanded as new function attributes are introduced. 10564 if (Caller.hasFnAttribute("interrupt")) 10565 return false; 10566 10567 // Do not tail call opt if the stack is used to pass parameters. 10568 if (CCInfo.getNextStackOffset() != 0) 10569 return false; 10570 10571 // Do not tail call opt if any parameters need to be passed indirectly. 10572 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are 10573 // passed indirectly. So the address of the value will be passed in a 10574 // register, or if not available, then the address is put on the stack. In 10575 // order to pass indirectly, space on the stack often needs to be allocated 10576 // in order to store the value. In this case the CCInfo.getNextStackOffset() 10577 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs 10578 // are passed CCValAssign::Indirect. 10579 for (auto &VA : ArgLocs) 10580 if (VA.getLocInfo() == CCValAssign::Indirect) 10581 return false; 10582 10583 // Do not tail call opt if either caller or callee uses struct return 10584 // semantics. 10585 auto IsCallerStructRet = Caller.hasStructRetAttr(); 10586 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet(); 10587 if (IsCallerStructRet || IsCalleeStructRet) 10588 return false; 10589 10590 // Externally-defined functions with weak linkage should not be 10591 // tail-called. The behaviour of branch instructions in this situation (as 10592 // used for tail calls) is implementation-defined, so we cannot rely on the 10593 // linker replacing the tail call with a return. 10594 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 10595 const GlobalValue *GV = G->getGlobal(); 10596 if (GV->hasExternalWeakLinkage()) 10597 return false; 10598 } 10599 10600 // The callee has to preserve all registers the caller needs to preserve. 10601 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo(); 10602 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 10603 if (CalleeCC != CallerCC) { 10604 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 10605 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 10606 return false; 10607 } 10608 10609 // Byval parameters hand the function a pointer directly into the stack area 10610 // we want to reuse during a tail call. Working around this *is* possible 10611 // but less efficient and uglier in LowerCall. 10612 for (auto &Arg : Outs) 10613 if (Arg.Flags.isByVal()) 10614 return false; 10615 10616 return true; 10617 } 10618 10619 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) { 10620 return DAG.getDataLayout().getPrefTypeAlign( 10621 VT.getTypeForEVT(*DAG.getContext())); 10622 } 10623 10624 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input 10625 // and output parameter nodes. 10626 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI, 10627 SmallVectorImpl<SDValue> &InVals) const { 10628 SelectionDAG &DAG = CLI.DAG; 10629 SDLoc &DL = CLI.DL; 10630 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs; 10631 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals; 10632 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins; 10633 SDValue Chain = CLI.Chain; 10634 SDValue Callee = CLI.Callee; 10635 bool &IsTailCall = CLI.IsTailCall; 10636 CallingConv::ID CallConv = CLI.CallConv; 10637 bool IsVarArg = CLI.IsVarArg; 10638 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 10639 MVT XLenVT = Subtarget.getXLenVT(); 10640 10641 MachineFunction &MF = DAG.getMachineFunction(); 10642 10643 // Analyze the operands of the call, assigning locations to each operand. 10644 SmallVector<CCValAssign, 16> ArgLocs; 10645 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext()); 10646 10647 if (CallConv == CallingConv::GHC) 10648 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC); 10649 else 10650 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI, 10651 CallConv == CallingConv::Fast ? CC_RISCV_FastCC 10652 : CC_RISCV); 10653 10654 // Check if it's really possible to do a tail call. 10655 if (IsTailCall) 10656 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs); 10657 10658 if (IsTailCall) 10659 ++NumTailCalls; 10660 else if (CLI.CB && CLI.CB->isMustTailCall()) 10661 report_fatal_error("failed to perform tail call elimination on a call " 10662 "site marked musttail"); 10663 10664 // Get a count of how many bytes are to be pushed on the stack. 10665 unsigned NumBytes = ArgCCInfo.getNextStackOffset(); 10666 10667 // Create local copies for byval args 10668 SmallVector<SDValue, 8> ByValArgs; 10669 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 10670 ISD::ArgFlagsTy Flags = Outs[i].Flags; 10671 if (!Flags.isByVal()) 10672 continue; 10673 10674 SDValue Arg = OutVals[i]; 10675 unsigned Size = Flags.getByValSize(); 10676 Align Alignment = Flags.getNonZeroByValAlign(); 10677 10678 int FI = 10679 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false); 10680 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 10681 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT); 10682 10683 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment, 10684 /*IsVolatile=*/false, 10685 /*AlwaysInline=*/false, IsTailCall, 10686 MachinePointerInfo(), MachinePointerInfo()); 10687 ByValArgs.push_back(FIPtr); 10688 } 10689 10690 if (!IsTailCall) 10691 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL); 10692 10693 // Copy argument values to their designated locations. 10694 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass; 10695 SmallVector<SDValue, 8> MemOpChains; 10696 SDValue StackPtr; 10697 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) { 10698 CCValAssign &VA = ArgLocs[i]; 10699 SDValue ArgValue = OutVals[i]; 10700 ISD::ArgFlagsTy Flags = Outs[i].Flags; 10701 10702 // Handle passing f64 on RV32D with a soft float ABI as a special case. 10703 bool IsF64OnRV32DSoftABI = 10704 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64; 10705 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) { 10706 SDValue SplitF64 = DAG.getNode( 10707 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue); 10708 SDValue Lo = SplitF64.getValue(0); 10709 SDValue Hi = SplitF64.getValue(1); 10710 10711 Register RegLo = VA.getLocReg(); 10712 RegsToPass.push_back(std::make_pair(RegLo, Lo)); 10713 10714 if (RegLo == RISCV::X17) { 10715 // Second half of f64 is passed on the stack. 10716 // Work out the address of the stack slot. 10717 if (!StackPtr.getNode()) 10718 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 10719 // Emit the store. 10720 MemOpChains.push_back( 10721 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo())); 10722 } else { 10723 // Second half of f64 is passed in another GPR. 10724 assert(RegLo < RISCV::X31 && "Invalid register pair"); 10725 Register RegHigh = RegLo + 1; 10726 RegsToPass.push_back(std::make_pair(RegHigh, Hi)); 10727 } 10728 continue; 10729 } 10730 10731 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way 10732 // as any other MemLoc. 10733 10734 // Promote the value if needed. 10735 // For now, only handle fully promoted and indirect arguments. 10736 if (VA.getLocInfo() == CCValAssign::Indirect) { 10737 // Store the argument in a stack slot and pass its address. 10738 Align StackAlign = 10739 std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG), 10740 getPrefTypeAlign(ArgValue.getValueType(), DAG)); 10741 TypeSize StoredSize = ArgValue.getValueType().getStoreSize(); 10742 // If the original argument was split (e.g. i128), we need 10743 // to store the required parts of it here (and pass just one address). 10744 // Vectors may be partly split to registers and partly to the stack, in 10745 // which case the base address is partly offset and subsequent stores are 10746 // relative to that. 10747 unsigned ArgIndex = Outs[i].OrigArgIndex; 10748 unsigned ArgPartOffset = Outs[i].PartOffset; 10749 assert(VA.getValVT().isVector() || ArgPartOffset == 0); 10750 // Calculate the total size to store. We don't have access to what we're 10751 // actually storing other than performing the loop and collecting the 10752 // info. 10753 SmallVector<std::pair<SDValue, SDValue>> Parts; 10754 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) { 10755 SDValue PartValue = OutVals[i + 1]; 10756 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset; 10757 SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL); 10758 EVT PartVT = PartValue.getValueType(); 10759 if (PartVT.isScalableVector()) 10760 Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset); 10761 StoredSize += PartVT.getStoreSize(); 10762 StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG)); 10763 Parts.push_back(std::make_pair(PartValue, Offset)); 10764 ++i; 10765 } 10766 SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign); 10767 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex(); 10768 MemOpChains.push_back( 10769 DAG.getStore(Chain, DL, ArgValue, SpillSlot, 10770 MachinePointerInfo::getFixedStack(MF, FI))); 10771 for (const auto &Part : Parts) { 10772 SDValue PartValue = Part.first; 10773 SDValue PartOffset = Part.second; 10774 SDValue Address = 10775 DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset); 10776 MemOpChains.push_back( 10777 DAG.getStore(Chain, DL, PartValue, Address, 10778 MachinePointerInfo::getFixedStack(MF, FI))); 10779 } 10780 ArgValue = SpillSlot; 10781 } else { 10782 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget); 10783 } 10784 10785 // Use local copy if it is a byval arg. 10786 if (Flags.isByVal()) 10787 ArgValue = ByValArgs[j++]; 10788 10789 if (VA.isRegLoc()) { 10790 // Queue up the argument copies and emit them at the end. 10791 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue)); 10792 } else { 10793 assert(VA.isMemLoc() && "Argument not register or memory"); 10794 assert(!IsTailCall && "Tail call not allowed if stack is used " 10795 "for passing parameters"); 10796 10797 // Work out the address of the stack slot. 10798 if (!StackPtr.getNode()) 10799 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT); 10800 SDValue Address = 10801 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, 10802 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL)); 10803 10804 // Emit the store. 10805 MemOpChains.push_back( 10806 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo())); 10807 } 10808 } 10809 10810 // Join the stores, which are independent of one another. 10811 if (!MemOpChains.empty()) 10812 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 10813 10814 SDValue Glue; 10815 10816 // Build a sequence of copy-to-reg nodes, chained and glued together. 10817 for (auto &Reg : RegsToPass) { 10818 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue); 10819 Glue = Chain.getValue(1); 10820 } 10821 10822 // Validate that none of the argument registers have been marked as 10823 // reserved, if so report an error. Do the same for the return address if this 10824 // is not a tailcall. 10825 validateCCReservedRegs(RegsToPass, MF); 10826 if (!IsTailCall && 10827 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1)) 10828 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 10829 MF.getFunction(), 10830 "Return address register required, but has been reserved."}); 10831 10832 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a 10833 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't 10834 // split it and then direct call can be matched by PseudoCALL. 10835 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) { 10836 const GlobalValue *GV = S->getGlobal(); 10837 10838 unsigned OpFlags = RISCVII::MO_CALL; 10839 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) 10840 OpFlags = RISCVII::MO_PLT; 10841 10842 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags); 10843 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 10844 unsigned OpFlags = RISCVII::MO_CALL; 10845 10846 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(), 10847 nullptr)) 10848 OpFlags = RISCVII::MO_PLT; 10849 10850 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags); 10851 } 10852 10853 // The first call operand is the chain and the second is the target address. 10854 SmallVector<SDValue, 8> Ops; 10855 Ops.push_back(Chain); 10856 Ops.push_back(Callee); 10857 10858 // Add argument registers to the end of the list so that they are 10859 // known live into the call. 10860 for (auto &Reg : RegsToPass) 10861 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType())); 10862 10863 if (!IsTailCall) { 10864 // Add a register mask operand representing the call-preserved registers. 10865 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo(); 10866 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv); 10867 assert(Mask && "Missing call preserved mask for calling convention"); 10868 Ops.push_back(DAG.getRegisterMask(Mask)); 10869 } 10870 10871 // Glue the call to the argument copies, if any. 10872 if (Glue.getNode()) 10873 Ops.push_back(Glue); 10874 10875 // Emit the call. 10876 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 10877 10878 if (IsTailCall) { 10879 MF.getFrameInfo().setHasTailCall(); 10880 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops); 10881 } 10882 10883 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops); 10884 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge); 10885 Glue = Chain.getValue(1); 10886 10887 // Mark the end of the call, which is glued to the call itself. 10888 Chain = DAG.getCALLSEQ_END(Chain, 10889 DAG.getConstant(NumBytes, DL, PtrVT, true), 10890 DAG.getConstant(0, DL, PtrVT, true), 10891 Glue, DL); 10892 Glue = Chain.getValue(1); 10893 10894 // Assign locations to each value returned by this call. 10895 SmallVector<CCValAssign, 16> RVLocs; 10896 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext()); 10897 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV); 10898 10899 // Copy all of the result registers out of their specified physreg. 10900 for (auto &VA : RVLocs) { 10901 // Copy the value out 10902 SDValue RetValue = 10903 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue); 10904 // Glue the RetValue to the end of the call sequence 10905 Chain = RetValue.getValue(1); 10906 Glue = RetValue.getValue(2); 10907 10908 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 10909 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment"); 10910 SDValue RetValue2 = 10911 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue); 10912 Chain = RetValue2.getValue(1); 10913 Glue = RetValue2.getValue(2); 10914 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue, 10915 RetValue2); 10916 } 10917 10918 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget); 10919 10920 InVals.push_back(RetValue); 10921 } 10922 10923 return Chain; 10924 } 10925 10926 bool RISCVTargetLowering::CanLowerReturn( 10927 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg, 10928 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 10929 SmallVector<CCValAssign, 16> RVLocs; 10930 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context); 10931 10932 Optional<unsigned> FirstMaskArgument; 10933 if (Subtarget.hasVInstructions()) 10934 FirstMaskArgument = preAssignMask(Outs); 10935 10936 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 10937 MVT VT = Outs[i].VT; 10938 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 10939 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI(); 10940 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full, 10941 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, 10942 *this, FirstMaskArgument)) 10943 return false; 10944 } 10945 return true; 10946 } 10947 10948 SDValue 10949 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 10950 bool IsVarArg, 10951 const SmallVectorImpl<ISD::OutputArg> &Outs, 10952 const SmallVectorImpl<SDValue> &OutVals, 10953 const SDLoc &DL, SelectionDAG &DAG) const { 10954 const MachineFunction &MF = DAG.getMachineFunction(); 10955 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 10956 10957 // Stores the assignment of the return value to a location. 10958 SmallVector<CCValAssign, 16> RVLocs; 10959 10960 // Info about the registers and stack slot. 10961 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs, 10962 *DAG.getContext()); 10963 10964 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true, 10965 nullptr, CC_RISCV); 10966 10967 if (CallConv == CallingConv::GHC && !RVLocs.empty()) 10968 report_fatal_error("GHC functions return void only"); 10969 10970 SDValue Glue; 10971 SmallVector<SDValue, 4> RetOps(1, Chain); 10972 10973 // Copy the result values into the output registers. 10974 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) { 10975 SDValue Val = OutVals[i]; 10976 CCValAssign &VA = RVLocs[i]; 10977 assert(VA.isRegLoc() && "Can only return in registers!"); 10978 10979 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) { 10980 // Handle returning f64 on RV32D with a soft float ABI. 10981 assert(VA.isRegLoc() && "Expected return via registers"); 10982 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL, 10983 DAG.getVTList(MVT::i32, MVT::i32), Val); 10984 SDValue Lo = SplitF64.getValue(0); 10985 SDValue Hi = SplitF64.getValue(1); 10986 Register RegLo = VA.getLocReg(); 10987 assert(RegLo < RISCV::X31 && "Invalid register pair"); 10988 Register RegHi = RegLo + 1; 10989 10990 if (STI.isRegisterReservedByUser(RegLo) || 10991 STI.isRegisterReservedByUser(RegHi)) 10992 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 10993 MF.getFunction(), 10994 "Return value register required, but has been reserved."}); 10995 10996 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue); 10997 Glue = Chain.getValue(1); 10998 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32)); 10999 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue); 11000 Glue = Chain.getValue(1); 11001 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32)); 11002 } else { 11003 // Handle a 'normal' return. 11004 Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget); 11005 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue); 11006 11007 if (STI.isRegisterReservedByUser(VA.getLocReg())) 11008 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{ 11009 MF.getFunction(), 11010 "Return value register required, but has been reserved."}); 11011 11012 // Guarantee that all emitted copies are stuck together. 11013 Glue = Chain.getValue(1); 11014 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 11015 } 11016 } 11017 11018 RetOps[0] = Chain; // Update chain. 11019 11020 // Add the glue node if we have it. 11021 if (Glue.getNode()) { 11022 RetOps.push_back(Glue); 11023 } 11024 11025 unsigned RetOpc = RISCVISD::RET_FLAG; 11026 // Interrupt service routines use different return instructions. 11027 const Function &Func = DAG.getMachineFunction().getFunction(); 11028 if (Func.hasFnAttribute("interrupt")) { 11029 if (!Func.getReturnType()->isVoidTy()) 11030 report_fatal_error( 11031 "Functions with the interrupt attribute must have void return type!"); 11032 11033 MachineFunction &MF = DAG.getMachineFunction(); 11034 StringRef Kind = 11035 MF.getFunction().getFnAttribute("interrupt").getValueAsString(); 11036 11037 if (Kind == "user") 11038 RetOpc = RISCVISD::URET_FLAG; 11039 else if (Kind == "supervisor") 11040 RetOpc = RISCVISD::SRET_FLAG; 11041 else 11042 RetOpc = RISCVISD::MRET_FLAG; 11043 } 11044 11045 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps); 11046 } 11047 11048 void RISCVTargetLowering::validateCCReservedRegs( 11049 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs, 11050 MachineFunction &MF) const { 11051 const Function &F = MF.getFunction(); 11052 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>(); 11053 11054 if (llvm::any_of(Regs, [&STI](auto Reg) { 11055 return STI.isRegisterReservedByUser(Reg.first); 11056 })) 11057 F.getContext().diagnose(DiagnosticInfoUnsupported{ 11058 F, "Argument register required, but has been reserved."}); 11059 } 11060 11061 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 11062 return CI->isTailCall(); 11063 } 11064 11065 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const { 11066 #define NODE_NAME_CASE(NODE) \ 11067 case RISCVISD::NODE: \ 11068 return "RISCVISD::" #NODE; 11069 // clang-format off 11070 switch ((RISCVISD::NodeType)Opcode) { 11071 case RISCVISD::FIRST_NUMBER: 11072 break; 11073 NODE_NAME_CASE(RET_FLAG) 11074 NODE_NAME_CASE(URET_FLAG) 11075 NODE_NAME_CASE(SRET_FLAG) 11076 NODE_NAME_CASE(MRET_FLAG) 11077 NODE_NAME_CASE(CALL) 11078 NODE_NAME_CASE(SELECT_CC) 11079 NODE_NAME_CASE(BR_CC) 11080 NODE_NAME_CASE(BuildPairF64) 11081 NODE_NAME_CASE(SplitF64) 11082 NODE_NAME_CASE(TAIL) 11083 NODE_NAME_CASE(MULHSU) 11084 NODE_NAME_CASE(SLLW) 11085 NODE_NAME_CASE(SRAW) 11086 NODE_NAME_CASE(SRLW) 11087 NODE_NAME_CASE(DIVW) 11088 NODE_NAME_CASE(DIVUW) 11089 NODE_NAME_CASE(REMUW) 11090 NODE_NAME_CASE(ROLW) 11091 NODE_NAME_CASE(RORW) 11092 NODE_NAME_CASE(CLZW) 11093 NODE_NAME_CASE(CTZW) 11094 NODE_NAME_CASE(FSLW) 11095 NODE_NAME_CASE(FSRW) 11096 NODE_NAME_CASE(FSL) 11097 NODE_NAME_CASE(FSR) 11098 NODE_NAME_CASE(FMV_H_X) 11099 NODE_NAME_CASE(FMV_X_ANYEXTH) 11100 NODE_NAME_CASE(FMV_X_SIGNEXTH) 11101 NODE_NAME_CASE(FMV_W_X_RV64) 11102 NODE_NAME_CASE(FMV_X_ANYEXTW_RV64) 11103 NODE_NAME_CASE(FCVT_X) 11104 NODE_NAME_CASE(FCVT_XU) 11105 NODE_NAME_CASE(FCVT_W_RV64) 11106 NODE_NAME_CASE(FCVT_WU_RV64) 11107 NODE_NAME_CASE(STRICT_FCVT_W_RV64) 11108 NODE_NAME_CASE(STRICT_FCVT_WU_RV64) 11109 NODE_NAME_CASE(READ_CYCLE_WIDE) 11110 NODE_NAME_CASE(GREV) 11111 NODE_NAME_CASE(GREVW) 11112 NODE_NAME_CASE(GORC) 11113 NODE_NAME_CASE(GORCW) 11114 NODE_NAME_CASE(SHFL) 11115 NODE_NAME_CASE(SHFLW) 11116 NODE_NAME_CASE(UNSHFL) 11117 NODE_NAME_CASE(UNSHFLW) 11118 NODE_NAME_CASE(BFP) 11119 NODE_NAME_CASE(BFPW) 11120 NODE_NAME_CASE(BCOMPRESS) 11121 NODE_NAME_CASE(BCOMPRESSW) 11122 NODE_NAME_CASE(BDECOMPRESS) 11123 NODE_NAME_CASE(BDECOMPRESSW) 11124 NODE_NAME_CASE(VMV_V_X_VL) 11125 NODE_NAME_CASE(VFMV_V_F_VL) 11126 NODE_NAME_CASE(VMV_X_S) 11127 NODE_NAME_CASE(VMV_S_X_VL) 11128 NODE_NAME_CASE(VFMV_S_F_VL) 11129 NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL) 11130 NODE_NAME_CASE(READ_VLENB) 11131 NODE_NAME_CASE(TRUNCATE_VECTOR_VL) 11132 NODE_NAME_CASE(VSLIDEUP_VL) 11133 NODE_NAME_CASE(VSLIDE1UP_VL) 11134 NODE_NAME_CASE(VSLIDEDOWN_VL) 11135 NODE_NAME_CASE(VSLIDE1DOWN_VL) 11136 NODE_NAME_CASE(VID_VL) 11137 NODE_NAME_CASE(VFNCVT_ROD_VL) 11138 NODE_NAME_CASE(VECREDUCE_ADD_VL) 11139 NODE_NAME_CASE(VECREDUCE_UMAX_VL) 11140 NODE_NAME_CASE(VECREDUCE_SMAX_VL) 11141 NODE_NAME_CASE(VECREDUCE_UMIN_VL) 11142 NODE_NAME_CASE(VECREDUCE_SMIN_VL) 11143 NODE_NAME_CASE(VECREDUCE_AND_VL) 11144 NODE_NAME_CASE(VECREDUCE_OR_VL) 11145 NODE_NAME_CASE(VECREDUCE_XOR_VL) 11146 NODE_NAME_CASE(VECREDUCE_FADD_VL) 11147 NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL) 11148 NODE_NAME_CASE(VECREDUCE_FMIN_VL) 11149 NODE_NAME_CASE(VECREDUCE_FMAX_VL) 11150 NODE_NAME_CASE(ADD_VL) 11151 NODE_NAME_CASE(AND_VL) 11152 NODE_NAME_CASE(MUL_VL) 11153 NODE_NAME_CASE(OR_VL) 11154 NODE_NAME_CASE(SDIV_VL) 11155 NODE_NAME_CASE(SHL_VL) 11156 NODE_NAME_CASE(SREM_VL) 11157 NODE_NAME_CASE(SRA_VL) 11158 NODE_NAME_CASE(SRL_VL) 11159 NODE_NAME_CASE(SUB_VL) 11160 NODE_NAME_CASE(UDIV_VL) 11161 NODE_NAME_CASE(UREM_VL) 11162 NODE_NAME_CASE(XOR_VL) 11163 NODE_NAME_CASE(SADDSAT_VL) 11164 NODE_NAME_CASE(UADDSAT_VL) 11165 NODE_NAME_CASE(SSUBSAT_VL) 11166 NODE_NAME_CASE(USUBSAT_VL) 11167 NODE_NAME_CASE(FADD_VL) 11168 NODE_NAME_CASE(FSUB_VL) 11169 NODE_NAME_CASE(FMUL_VL) 11170 NODE_NAME_CASE(FDIV_VL) 11171 NODE_NAME_CASE(FNEG_VL) 11172 NODE_NAME_CASE(FABS_VL) 11173 NODE_NAME_CASE(FSQRT_VL) 11174 NODE_NAME_CASE(FMA_VL) 11175 NODE_NAME_CASE(FCOPYSIGN_VL) 11176 NODE_NAME_CASE(SMIN_VL) 11177 NODE_NAME_CASE(SMAX_VL) 11178 NODE_NAME_CASE(UMIN_VL) 11179 NODE_NAME_CASE(UMAX_VL) 11180 NODE_NAME_CASE(FMINNUM_VL) 11181 NODE_NAME_CASE(FMAXNUM_VL) 11182 NODE_NAME_CASE(MULHS_VL) 11183 NODE_NAME_CASE(MULHU_VL) 11184 NODE_NAME_CASE(FP_TO_SINT_VL) 11185 NODE_NAME_CASE(FP_TO_UINT_VL) 11186 NODE_NAME_CASE(SINT_TO_FP_VL) 11187 NODE_NAME_CASE(UINT_TO_FP_VL) 11188 NODE_NAME_CASE(FP_EXTEND_VL) 11189 NODE_NAME_CASE(FP_ROUND_VL) 11190 NODE_NAME_CASE(VWMUL_VL) 11191 NODE_NAME_CASE(VWMULU_VL) 11192 NODE_NAME_CASE(VWMULSU_VL) 11193 NODE_NAME_CASE(VWADD_VL) 11194 NODE_NAME_CASE(VWADDU_VL) 11195 NODE_NAME_CASE(VWSUB_VL) 11196 NODE_NAME_CASE(VWSUBU_VL) 11197 NODE_NAME_CASE(VWADD_W_VL) 11198 NODE_NAME_CASE(VWADDU_W_VL) 11199 NODE_NAME_CASE(VWSUB_W_VL) 11200 NODE_NAME_CASE(VWSUBU_W_VL) 11201 NODE_NAME_CASE(SETCC_VL) 11202 NODE_NAME_CASE(VSELECT_VL) 11203 NODE_NAME_CASE(VP_MERGE_VL) 11204 NODE_NAME_CASE(VMAND_VL) 11205 NODE_NAME_CASE(VMOR_VL) 11206 NODE_NAME_CASE(VMXOR_VL) 11207 NODE_NAME_CASE(VMCLR_VL) 11208 NODE_NAME_CASE(VMSET_VL) 11209 NODE_NAME_CASE(VRGATHER_VX_VL) 11210 NODE_NAME_CASE(VRGATHER_VV_VL) 11211 NODE_NAME_CASE(VRGATHEREI16_VV_VL) 11212 NODE_NAME_CASE(VSEXT_VL) 11213 NODE_NAME_CASE(VZEXT_VL) 11214 NODE_NAME_CASE(VCPOP_VL) 11215 NODE_NAME_CASE(READ_CSR) 11216 NODE_NAME_CASE(WRITE_CSR) 11217 NODE_NAME_CASE(SWAP_CSR) 11218 } 11219 // clang-format on 11220 return nullptr; 11221 #undef NODE_NAME_CASE 11222 } 11223 11224 /// getConstraintType - Given a constraint letter, return the type of 11225 /// constraint it is for this target. 11226 RISCVTargetLowering::ConstraintType 11227 RISCVTargetLowering::getConstraintType(StringRef Constraint) const { 11228 if (Constraint.size() == 1) { 11229 switch (Constraint[0]) { 11230 default: 11231 break; 11232 case 'f': 11233 return C_RegisterClass; 11234 case 'I': 11235 case 'J': 11236 case 'K': 11237 return C_Immediate; 11238 case 'A': 11239 return C_Memory; 11240 case 'S': // A symbolic address 11241 return C_Other; 11242 } 11243 } else { 11244 if (Constraint == "vr" || Constraint == "vm") 11245 return C_RegisterClass; 11246 } 11247 return TargetLowering::getConstraintType(Constraint); 11248 } 11249 11250 std::pair<unsigned, const TargetRegisterClass *> 11251 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, 11252 StringRef Constraint, 11253 MVT VT) const { 11254 // First, see if this is a constraint that directly corresponds to a 11255 // RISCV register class. 11256 if (Constraint.size() == 1) { 11257 switch (Constraint[0]) { 11258 case 'r': 11259 // TODO: Support fixed vectors up to XLen for P extension? 11260 if (VT.isVector()) 11261 break; 11262 return std::make_pair(0U, &RISCV::GPRRegClass); 11263 case 'f': 11264 if (Subtarget.hasStdExtZfh() && VT == MVT::f16) 11265 return std::make_pair(0U, &RISCV::FPR16RegClass); 11266 if (Subtarget.hasStdExtF() && VT == MVT::f32) 11267 return std::make_pair(0U, &RISCV::FPR32RegClass); 11268 if (Subtarget.hasStdExtD() && VT == MVT::f64) 11269 return std::make_pair(0U, &RISCV::FPR64RegClass); 11270 break; 11271 default: 11272 break; 11273 } 11274 } else if (Constraint == "vr") { 11275 for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass, 11276 &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { 11277 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) 11278 return std::make_pair(0U, RC); 11279 } 11280 } else if (Constraint == "vm") { 11281 if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy)) 11282 return std::make_pair(0U, &RISCV::VMV0RegClass); 11283 } 11284 11285 // Clang will correctly decode the usage of register name aliases into their 11286 // official names. However, other frontends like `rustc` do not. This allows 11287 // users of these frontends to use the ABI names for registers in LLVM-style 11288 // register constraints. 11289 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower()) 11290 .Case("{zero}", RISCV::X0) 11291 .Case("{ra}", RISCV::X1) 11292 .Case("{sp}", RISCV::X2) 11293 .Case("{gp}", RISCV::X3) 11294 .Case("{tp}", RISCV::X4) 11295 .Case("{t0}", RISCV::X5) 11296 .Case("{t1}", RISCV::X6) 11297 .Case("{t2}", RISCV::X7) 11298 .Cases("{s0}", "{fp}", RISCV::X8) 11299 .Case("{s1}", RISCV::X9) 11300 .Case("{a0}", RISCV::X10) 11301 .Case("{a1}", RISCV::X11) 11302 .Case("{a2}", RISCV::X12) 11303 .Case("{a3}", RISCV::X13) 11304 .Case("{a4}", RISCV::X14) 11305 .Case("{a5}", RISCV::X15) 11306 .Case("{a6}", RISCV::X16) 11307 .Case("{a7}", RISCV::X17) 11308 .Case("{s2}", RISCV::X18) 11309 .Case("{s3}", RISCV::X19) 11310 .Case("{s4}", RISCV::X20) 11311 .Case("{s5}", RISCV::X21) 11312 .Case("{s6}", RISCV::X22) 11313 .Case("{s7}", RISCV::X23) 11314 .Case("{s8}", RISCV::X24) 11315 .Case("{s9}", RISCV::X25) 11316 .Case("{s10}", RISCV::X26) 11317 .Case("{s11}", RISCV::X27) 11318 .Case("{t3}", RISCV::X28) 11319 .Case("{t4}", RISCV::X29) 11320 .Case("{t5}", RISCV::X30) 11321 .Case("{t6}", RISCV::X31) 11322 .Default(RISCV::NoRegister); 11323 if (XRegFromAlias != RISCV::NoRegister) 11324 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass); 11325 11326 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the 11327 // TableGen record rather than the AsmName to choose registers for InlineAsm 11328 // constraints, plus we want to match those names to the widest floating point 11329 // register type available, manually select floating point registers here. 11330 // 11331 // The second case is the ABI name of the register, so that frontends can also 11332 // use the ABI names in register constraint lists. 11333 if (Subtarget.hasStdExtF()) { 11334 unsigned FReg = StringSwitch<unsigned>(Constraint.lower()) 11335 .Cases("{f0}", "{ft0}", RISCV::F0_F) 11336 .Cases("{f1}", "{ft1}", RISCV::F1_F) 11337 .Cases("{f2}", "{ft2}", RISCV::F2_F) 11338 .Cases("{f3}", "{ft3}", RISCV::F3_F) 11339 .Cases("{f4}", "{ft4}", RISCV::F4_F) 11340 .Cases("{f5}", "{ft5}", RISCV::F5_F) 11341 .Cases("{f6}", "{ft6}", RISCV::F6_F) 11342 .Cases("{f7}", "{ft7}", RISCV::F7_F) 11343 .Cases("{f8}", "{fs0}", RISCV::F8_F) 11344 .Cases("{f9}", "{fs1}", RISCV::F9_F) 11345 .Cases("{f10}", "{fa0}", RISCV::F10_F) 11346 .Cases("{f11}", "{fa1}", RISCV::F11_F) 11347 .Cases("{f12}", "{fa2}", RISCV::F12_F) 11348 .Cases("{f13}", "{fa3}", RISCV::F13_F) 11349 .Cases("{f14}", "{fa4}", RISCV::F14_F) 11350 .Cases("{f15}", "{fa5}", RISCV::F15_F) 11351 .Cases("{f16}", "{fa6}", RISCV::F16_F) 11352 .Cases("{f17}", "{fa7}", RISCV::F17_F) 11353 .Cases("{f18}", "{fs2}", RISCV::F18_F) 11354 .Cases("{f19}", "{fs3}", RISCV::F19_F) 11355 .Cases("{f20}", "{fs4}", RISCV::F20_F) 11356 .Cases("{f21}", "{fs5}", RISCV::F21_F) 11357 .Cases("{f22}", "{fs6}", RISCV::F22_F) 11358 .Cases("{f23}", "{fs7}", RISCV::F23_F) 11359 .Cases("{f24}", "{fs8}", RISCV::F24_F) 11360 .Cases("{f25}", "{fs9}", RISCV::F25_F) 11361 .Cases("{f26}", "{fs10}", RISCV::F26_F) 11362 .Cases("{f27}", "{fs11}", RISCV::F27_F) 11363 .Cases("{f28}", "{ft8}", RISCV::F28_F) 11364 .Cases("{f29}", "{ft9}", RISCV::F29_F) 11365 .Cases("{f30}", "{ft10}", RISCV::F30_F) 11366 .Cases("{f31}", "{ft11}", RISCV::F31_F) 11367 .Default(RISCV::NoRegister); 11368 if (FReg != RISCV::NoRegister) { 11369 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg"); 11370 if (Subtarget.hasStdExtD() && (VT == MVT::f64 || VT == MVT::Other)) { 11371 unsigned RegNo = FReg - RISCV::F0_F; 11372 unsigned DReg = RISCV::F0_D + RegNo; 11373 return std::make_pair(DReg, &RISCV::FPR64RegClass); 11374 } 11375 if (VT == MVT::f32 || VT == MVT::Other) 11376 return std::make_pair(FReg, &RISCV::FPR32RegClass); 11377 if (Subtarget.hasStdExtZfh() && VT == MVT::f16) { 11378 unsigned RegNo = FReg - RISCV::F0_F; 11379 unsigned HReg = RISCV::F0_H + RegNo; 11380 return std::make_pair(HReg, &RISCV::FPR16RegClass); 11381 } 11382 } 11383 } 11384 11385 if (Subtarget.hasVInstructions()) { 11386 Register VReg = StringSwitch<Register>(Constraint.lower()) 11387 .Case("{v0}", RISCV::V0) 11388 .Case("{v1}", RISCV::V1) 11389 .Case("{v2}", RISCV::V2) 11390 .Case("{v3}", RISCV::V3) 11391 .Case("{v4}", RISCV::V4) 11392 .Case("{v5}", RISCV::V5) 11393 .Case("{v6}", RISCV::V6) 11394 .Case("{v7}", RISCV::V7) 11395 .Case("{v8}", RISCV::V8) 11396 .Case("{v9}", RISCV::V9) 11397 .Case("{v10}", RISCV::V10) 11398 .Case("{v11}", RISCV::V11) 11399 .Case("{v12}", RISCV::V12) 11400 .Case("{v13}", RISCV::V13) 11401 .Case("{v14}", RISCV::V14) 11402 .Case("{v15}", RISCV::V15) 11403 .Case("{v16}", RISCV::V16) 11404 .Case("{v17}", RISCV::V17) 11405 .Case("{v18}", RISCV::V18) 11406 .Case("{v19}", RISCV::V19) 11407 .Case("{v20}", RISCV::V20) 11408 .Case("{v21}", RISCV::V21) 11409 .Case("{v22}", RISCV::V22) 11410 .Case("{v23}", RISCV::V23) 11411 .Case("{v24}", RISCV::V24) 11412 .Case("{v25}", RISCV::V25) 11413 .Case("{v26}", RISCV::V26) 11414 .Case("{v27}", RISCV::V27) 11415 .Case("{v28}", RISCV::V28) 11416 .Case("{v29}", RISCV::V29) 11417 .Case("{v30}", RISCV::V30) 11418 .Case("{v31}", RISCV::V31) 11419 .Default(RISCV::NoRegister); 11420 if (VReg != RISCV::NoRegister) { 11421 if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy)) 11422 return std::make_pair(VReg, &RISCV::VMRegClass); 11423 if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy)) 11424 return std::make_pair(VReg, &RISCV::VRRegClass); 11425 for (const auto *RC : 11426 {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { 11427 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) { 11428 VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC); 11429 return std::make_pair(VReg, RC); 11430 } 11431 } 11432 } 11433 } 11434 11435 std::pair<Register, const TargetRegisterClass *> Res = 11436 TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 11437 11438 // If we picked one of the Zfinx register classes, remap it to the GPR class. 11439 // FIXME: When Zfinx is supported in CodeGen this will need to take the 11440 // Subtarget into account. 11441 if (Res.second == &RISCV::GPRF16RegClass || 11442 Res.second == &RISCV::GPRF32RegClass || 11443 Res.second == &RISCV::GPRF64RegClass) 11444 return std::make_pair(Res.first, &RISCV::GPRRegClass); 11445 11446 return Res; 11447 } 11448 11449 unsigned 11450 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const { 11451 // Currently only support length 1 constraints. 11452 if (ConstraintCode.size() == 1) { 11453 switch (ConstraintCode[0]) { 11454 case 'A': 11455 return InlineAsm::Constraint_A; 11456 default: 11457 break; 11458 } 11459 } 11460 11461 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode); 11462 } 11463 11464 void RISCVTargetLowering::LowerAsmOperandForConstraint( 11465 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 11466 SelectionDAG &DAG) const { 11467 // Currently only support length 1 constraints. 11468 if (Constraint.length() == 1) { 11469 switch (Constraint[0]) { 11470 case 'I': 11471 // Validate & create a 12-bit signed immediate operand. 11472 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 11473 uint64_t CVal = C->getSExtValue(); 11474 if (isInt<12>(CVal)) 11475 Ops.push_back( 11476 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 11477 } 11478 return; 11479 case 'J': 11480 // Validate & create an integer zero operand. 11481 if (auto *C = dyn_cast<ConstantSDNode>(Op)) 11482 if (C->getZExtValue() == 0) 11483 Ops.push_back( 11484 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT())); 11485 return; 11486 case 'K': 11487 // Validate & create a 5-bit unsigned immediate operand. 11488 if (auto *C = dyn_cast<ConstantSDNode>(Op)) { 11489 uint64_t CVal = C->getZExtValue(); 11490 if (isUInt<5>(CVal)) 11491 Ops.push_back( 11492 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT())); 11493 } 11494 return; 11495 case 'S': 11496 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 11497 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 11498 GA->getValueType(0))); 11499 } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) { 11500 Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(), 11501 BA->getValueType(0))); 11502 } 11503 return; 11504 default: 11505 break; 11506 } 11507 } 11508 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 11509 } 11510 11511 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder, 11512 Instruction *Inst, 11513 AtomicOrdering Ord) const { 11514 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent) 11515 return Builder.CreateFence(Ord); 11516 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord)) 11517 return Builder.CreateFence(AtomicOrdering::Release); 11518 return nullptr; 11519 } 11520 11521 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder, 11522 Instruction *Inst, 11523 AtomicOrdering Ord) const { 11524 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord)) 11525 return Builder.CreateFence(AtomicOrdering::Acquire); 11526 return nullptr; 11527 } 11528 11529 TargetLowering::AtomicExpansionKind 11530 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 11531 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating 11532 // point operations can't be used in an lr/sc sequence without breaking the 11533 // forward-progress guarantee. 11534 if (AI->isFloatingPointOperation()) 11535 return AtomicExpansionKind::CmpXChg; 11536 11537 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 11538 if (Size == 8 || Size == 16) 11539 return AtomicExpansionKind::MaskedIntrinsic; 11540 return AtomicExpansionKind::None; 11541 } 11542 11543 static Intrinsic::ID 11544 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) { 11545 if (XLen == 32) { 11546 switch (BinOp) { 11547 default: 11548 llvm_unreachable("Unexpected AtomicRMW BinOp"); 11549 case AtomicRMWInst::Xchg: 11550 return Intrinsic::riscv_masked_atomicrmw_xchg_i32; 11551 case AtomicRMWInst::Add: 11552 return Intrinsic::riscv_masked_atomicrmw_add_i32; 11553 case AtomicRMWInst::Sub: 11554 return Intrinsic::riscv_masked_atomicrmw_sub_i32; 11555 case AtomicRMWInst::Nand: 11556 return Intrinsic::riscv_masked_atomicrmw_nand_i32; 11557 case AtomicRMWInst::Max: 11558 return Intrinsic::riscv_masked_atomicrmw_max_i32; 11559 case AtomicRMWInst::Min: 11560 return Intrinsic::riscv_masked_atomicrmw_min_i32; 11561 case AtomicRMWInst::UMax: 11562 return Intrinsic::riscv_masked_atomicrmw_umax_i32; 11563 case AtomicRMWInst::UMin: 11564 return Intrinsic::riscv_masked_atomicrmw_umin_i32; 11565 } 11566 } 11567 11568 if (XLen == 64) { 11569 switch (BinOp) { 11570 default: 11571 llvm_unreachable("Unexpected AtomicRMW BinOp"); 11572 case AtomicRMWInst::Xchg: 11573 return Intrinsic::riscv_masked_atomicrmw_xchg_i64; 11574 case AtomicRMWInst::Add: 11575 return Intrinsic::riscv_masked_atomicrmw_add_i64; 11576 case AtomicRMWInst::Sub: 11577 return Intrinsic::riscv_masked_atomicrmw_sub_i64; 11578 case AtomicRMWInst::Nand: 11579 return Intrinsic::riscv_masked_atomicrmw_nand_i64; 11580 case AtomicRMWInst::Max: 11581 return Intrinsic::riscv_masked_atomicrmw_max_i64; 11582 case AtomicRMWInst::Min: 11583 return Intrinsic::riscv_masked_atomicrmw_min_i64; 11584 case AtomicRMWInst::UMax: 11585 return Intrinsic::riscv_masked_atomicrmw_umax_i64; 11586 case AtomicRMWInst::UMin: 11587 return Intrinsic::riscv_masked_atomicrmw_umin_i64; 11588 } 11589 } 11590 11591 llvm_unreachable("Unexpected XLen\n"); 11592 } 11593 11594 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic( 11595 IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr, 11596 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const { 11597 unsigned XLen = Subtarget.getXLen(); 11598 Value *Ordering = 11599 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering())); 11600 Type *Tys[] = {AlignedAddr->getType()}; 11601 Function *LrwOpScwLoop = Intrinsic::getDeclaration( 11602 AI->getModule(), 11603 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys); 11604 11605 if (XLen == 64) { 11606 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty()); 11607 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 11608 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty()); 11609 } 11610 11611 Value *Result; 11612 11613 // Must pass the shift amount needed to sign extend the loaded value prior 11614 // to performing a signed comparison for min/max. ShiftAmt is the number of 11615 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which 11616 // is the number of bits to left+right shift the value in order to 11617 // sign-extend. 11618 if (AI->getOperation() == AtomicRMWInst::Min || 11619 AI->getOperation() == AtomicRMWInst::Max) { 11620 const DataLayout &DL = AI->getModule()->getDataLayout(); 11621 unsigned ValWidth = 11622 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType()); 11623 Value *SextShamt = 11624 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt); 11625 Result = Builder.CreateCall(LrwOpScwLoop, 11626 {AlignedAddr, Incr, Mask, SextShamt, Ordering}); 11627 } else { 11628 Result = 11629 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering}); 11630 } 11631 11632 if (XLen == 64) 11633 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 11634 return Result; 11635 } 11636 11637 TargetLowering::AtomicExpansionKind 11638 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR( 11639 AtomicCmpXchgInst *CI) const { 11640 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits(); 11641 if (Size == 8 || Size == 16) 11642 return AtomicExpansionKind::MaskedIntrinsic; 11643 return AtomicExpansionKind::None; 11644 } 11645 11646 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic( 11647 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr, 11648 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const { 11649 unsigned XLen = Subtarget.getXLen(); 11650 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord)); 11651 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32; 11652 if (XLen == 64) { 11653 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty()); 11654 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty()); 11655 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty()); 11656 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64; 11657 } 11658 Type *Tys[] = {AlignedAddr->getType()}; 11659 Function *MaskedCmpXchg = 11660 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys); 11661 Value *Result = Builder.CreateCall( 11662 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering}); 11663 if (XLen == 64) 11664 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty()); 11665 return Result; 11666 } 11667 11668 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const { 11669 return false; 11670 } 11671 11672 bool RISCVTargetLowering::shouldConvertFpToSat(unsigned Op, EVT FPVT, 11673 EVT VT) const { 11674 if (!isOperationLegalOrCustom(Op, VT) || !FPVT.isSimple()) 11675 return false; 11676 11677 switch (FPVT.getSimpleVT().SimpleTy) { 11678 case MVT::f16: 11679 return Subtarget.hasStdExtZfh(); 11680 case MVT::f32: 11681 return Subtarget.hasStdExtF(); 11682 case MVT::f64: 11683 return Subtarget.hasStdExtD(); 11684 default: 11685 return false; 11686 } 11687 } 11688 11689 unsigned RISCVTargetLowering::getJumpTableEncoding() const { 11690 // If we are using the small code model, we can reduce size of jump table 11691 // entry to 4 bytes. 11692 if (Subtarget.is64Bit() && !isPositionIndependent() && 11693 getTargetMachine().getCodeModel() == CodeModel::Small) { 11694 return MachineJumpTableInfo::EK_Custom32; 11695 } 11696 return TargetLowering::getJumpTableEncoding(); 11697 } 11698 11699 const MCExpr *RISCVTargetLowering::LowerCustomJumpTableEntry( 11700 const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, 11701 unsigned uid, MCContext &Ctx) const { 11702 assert(Subtarget.is64Bit() && !isPositionIndependent() && 11703 getTargetMachine().getCodeModel() == CodeModel::Small); 11704 return MCSymbolRefExpr::create(MBB->getSymbol(), Ctx); 11705 } 11706 11707 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF, 11708 EVT VT) const { 11709 VT = VT.getScalarType(); 11710 11711 if (!VT.isSimple()) 11712 return false; 11713 11714 switch (VT.getSimpleVT().SimpleTy) { 11715 case MVT::f16: 11716 return Subtarget.hasStdExtZfh(); 11717 case MVT::f32: 11718 return Subtarget.hasStdExtF(); 11719 case MVT::f64: 11720 return Subtarget.hasStdExtD(); 11721 default: 11722 break; 11723 } 11724 11725 return false; 11726 } 11727 11728 Register RISCVTargetLowering::getExceptionPointerRegister( 11729 const Constant *PersonalityFn) const { 11730 return RISCV::X10; 11731 } 11732 11733 Register RISCVTargetLowering::getExceptionSelectorRegister( 11734 const Constant *PersonalityFn) const { 11735 return RISCV::X11; 11736 } 11737 11738 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const { 11739 // Return false to suppress the unnecessary extensions if the LibCall 11740 // arguments or return value is f32 type for LP64 ABI. 11741 RISCVABI::ABI ABI = Subtarget.getTargetABI(); 11742 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32)) 11743 return false; 11744 11745 return true; 11746 } 11747 11748 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const { 11749 if (Subtarget.is64Bit() && Type == MVT::i32) 11750 return true; 11751 11752 return IsSigned; 11753 } 11754 11755 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT, 11756 SDValue C) const { 11757 // Check integral scalar types. 11758 if (VT.isScalarInteger()) { 11759 // Omit the optimization if the sub target has the M extension and the data 11760 // size exceeds XLen. 11761 if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen()) 11762 return false; 11763 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) { 11764 // Break the MUL to a SLLI and an ADD/SUB. 11765 const APInt &Imm = ConstNode->getAPIntValue(); 11766 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() || 11767 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2()) 11768 return true; 11769 // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12. 11770 if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) && 11771 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() || 11772 (Imm - 8).isPowerOf2())) 11773 return true; 11774 // Omit the following optimization if the sub target has the M extension 11775 // and the data size >= XLen. 11776 if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen()) 11777 return false; 11778 // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs 11779 // a pair of LUI/ADDI. 11780 if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) { 11781 APInt ImmS = Imm.ashr(Imm.countTrailingZeros()); 11782 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() || 11783 (1 - ImmS).isPowerOf2()) 11784 return true; 11785 } 11786 } 11787 } 11788 11789 return false; 11790 } 11791 11792 bool RISCVTargetLowering::isMulAddWithConstProfitable(SDValue AddNode, 11793 SDValue ConstNode) const { 11794 // Let the DAGCombiner decide for vectors. 11795 EVT VT = AddNode.getValueType(); 11796 if (VT.isVector()) 11797 return true; 11798 11799 // Let the DAGCombiner decide for larger types. 11800 if (VT.getScalarSizeInBits() > Subtarget.getXLen()) 11801 return true; 11802 11803 // It is worse if c1 is simm12 while c1*c2 is not. 11804 ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1)); 11805 ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode); 11806 const APInt &C1 = C1Node->getAPIntValue(); 11807 const APInt &C2 = C2Node->getAPIntValue(); 11808 if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12)) 11809 return false; 11810 11811 // Default to true and let the DAGCombiner decide. 11812 return true; 11813 } 11814 11815 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses( 11816 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, 11817 bool *Fast) const { 11818 if (!VT.isVector()) 11819 return false; 11820 11821 EVT ElemVT = VT.getVectorElementType(); 11822 if (Alignment >= ElemVT.getStoreSize()) { 11823 if (Fast) 11824 *Fast = true; 11825 return true; 11826 } 11827 11828 return false; 11829 } 11830 11831 bool RISCVTargetLowering::splitValueIntoRegisterParts( 11832 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, 11833 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const { 11834 bool IsABIRegCopy = CC.hasValue(); 11835 EVT ValueVT = Val.getValueType(); 11836 if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) { 11837 // Cast the f16 to i16, extend to i32, pad with ones to make a float nan, 11838 // and cast to f32. 11839 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val); 11840 Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val); 11841 Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val, 11842 DAG.getConstant(0xFFFF0000, DL, MVT::i32)); 11843 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val); 11844 Parts[0] = Val; 11845 return true; 11846 } 11847 11848 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { 11849 LLVMContext &Context = *DAG.getContext(); 11850 EVT ValueEltVT = ValueVT.getVectorElementType(); 11851 EVT PartEltVT = PartVT.getVectorElementType(); 11852 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); 11853 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); 11854 if (PartVTBitSize % ValueVTBitSize == 0) { 11855 assert(PartVTBitSize >= ValueVTBitSize); 11856 // If the element types are different, bitcast to the same element type of 11857 // PartVT first. 11858 // Give an example here, we want copy a <vscale x 1 x i8> value to 11859 // <vscale x 4 x i16>. 11860 // We need to convert <vscale x 1 x i8> to <vscale x 8 x i8> by insert 11861 // subvector, then we can bitcast to <vscale x 4 x i16>. 11862 if (ValueEltVT != PartEltVT) { 11863 if (PartVTBitSize > ValueVTBitSize) { 11864 unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits(); 11865 assert(Count != 0 && "The number of element should not be zero."); 11866 EVT SameEltTypeVT = 11867 EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true); 11868 Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SameEltTypeVT, 11869 DAG.getUNDEF(SameEltTypeVT), Val, 11870 DAG.getVectorIdxConstant(0, DL)); 11871 } 11872 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); 11873 } else { 11874 Val = 11875 DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT), 11876 Val, DAG.getVectorIdxConstant(0, DL)); 11877 } 11878 Parts[0] = Val; 11879 return true; 11880 } 11881 } 11882 return false; 11883 } 11884 11885 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue( 11886 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, 11887 MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const { 11888 bool IsABIRegCopy = CC.hasValue(); 11889 if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) { 11890 SDValue Val = Parts[0]; 11891 11892 // Cast the f32 to i32, truncate to i16, and cast back to f16. 11893 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val); 11894 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val); 11895 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val); 11896 return Val; 11897 } 11898 11899 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { 11900 LLVMContext &Context = *DAG.getContext(); 11901 SDValue Val = Parts[0]; 11902 EVT ValueEltVT = ValueVT.getVectorElementType(); 11903 EVT PartEltVT = PartVT.getVectorElementType(); 11904 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize(); 11905 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize(); 11906 if (PartVTBitSize % ValueVTBitSize == 0) { 11907 assert(PartVTBitSize >= ValueVTBitSize); 11908 EVT SameEltTypeVT = ValueVT; 11909 // If the element types are different, convert it to the same element type 11910 // of PartVT. 11911 // Give an example here, we want copy a <vscale x 1 x i8> value from 11912 // <vscale x 4 x i16>. 11913 // We need to convert <vscale x 4 x i16> to <vscale x 8 x i8> first, 11914 // then we can extract <vscale x 1 x i8>. 11915 if (ValueEltVT != PartEltVT) { 11916 unsigned Count = PartVTBitSize / ValueEltVT.getFixedSizeInBits(); 11917 assert(Count != 0 && "The number of element should not be zero."); 11918 SameEltTypeVT = 11919 EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true); 11920 Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val); 11921 } 11922 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val, 11923 DAG.getVectorIdxConstant(0, DL)); 11924 return Val; 11925 } 11926 } 11927 return SDValue(); 11928 } 11929 11930 SDValue 11931 RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 11932 SelectionDAG &DAG, 11933 SmallVectorImpl<SDNode *> &Created) const { 11934 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 11935 if (isIntDivCheap(N->getValueType(0), Attr)) 11936 return SDValue(N, 0); // Lower SDIV as SDIV 11937 11938 assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) && 11939 "Unexpected divisor!"); 11940 11941 // Conditional move is needed, so do the transformation iff Zbt is enabled. 11942 if (!Subtarget.hasStdExtZbt()) 11943 return SDValue(); 11944 11945 // When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation. 11946 // Besides, more critical path instructions will be generated when dividing 11947 // by 2. So we keep using the original DAGs for these cases. 11948 unsigned Lg2 = Divisor.countTrailingZeros(); 11949 if (Lg2 == 1 || Lg2 >= 12) 11950 return SDValue(); 11951 11952 // fold (sdiv X, pow2) 11953 EVT VT = N->getValueType(0); 11954 if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64)) 11955 return SDValue(); 11956 11957 SDLoc DL(N); 11958 SDValue N0 = N->getOperand(0); 11959 SDValue Zero = DAG.getConstant(0, DL, VT); 11960 SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT); 11961 11962 // Add (N0 < 0) ? Pow2 - 1 : 0; 11963 SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT); 11964 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne); 11965 SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0); 11966 11967 Created.push_back(Cmp.getNode()); 11968 Created.push_back(Add.getNode()); 11969 Created.push_back(Sel.getNode()); 11970 11971 // Divide by pow2. 11972 SDValue SRA = 11973 DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT)); 11974 11975 // If we're dividing by a positive value, we're done. Otherwise, we must 11976 // negate the result. 11977 if (Divisor.isNonNegative()) 11978 return SRA; 11979 11980 Created.push_back(SRA.getNode()); 11981 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA); 11982 } 11983 11984 #define GET_REGISTER_MATCHER 11985 #include "RISCVGenAsmMatcher.inc" 11986 11987 Register 11988 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT, 11989 const MachineFunction &MF) const { 11990 Register Reg = MatchRegisterAltName(RegName); 11991 if (Reg == RISCV::NoRegister) 11992 Reg = MatchRegisterName(RegName); 11993 if (Reg == RISCV::NoRegister) 11994 report_fatal_error( 11995 Twine("Invalid register name \"" + StringRef(RegName) + "\".")); 11996 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF); 11997 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg)) 11998 report_fatal_error(Twine("Trying to obtain non-reserved register \"" + 11999 StringRef(RegName) + "\".")); 12000 return Reg; 12001 } 12002 12003 namespace llvm { 12004 namespace RISCVVIntrinsicsTable { 12005 12006 #define GET_RISCVVIntrinsicsTable_IMPL 12007 #include "RISCVGenSearchableTables.inc" 12008 12009 } // namespace RISCVVIntrinsicsTable 12010 12011 } // namespace llvm 12012