1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines an instruction selector for the RISCV target. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVISelDAGToDAG.h" 14 #include "MCTargetDesc/RISCVMCTargetDesc.h" 15 #include "MCTargetDesc/RISCVMatInt.h" 16 #include "RISCVISelLowering.h" 17 #include "RISCVMachineFunctionInfo.h" 18 #include "llvm/CodeGen/MachineFrameInfo.h" 19 #include "llvm/IR/IntrinsicsRISCV.h" 20 #include "llvm/Support/Alignment.h" 21 #include "llvm/Support/Debug.h" 22 #include "llvm/Support/KnownBits.h" 23 #include "llvm/Support/MathExtras.h" 24 #include "llvm/Support/raw_ostream.h" 25 26 using namespace llvm; 27 28 #define DEBUG_TYPE "riscv-isel" 29 30 namespace llvm { 31 namespace RISCV { 32 #define GET_RISCVVSSEGTable_IMPL 33 #define GET_RISCVVLSEGTable_IMPL 34 #define GET_RISCVVLXSEGTable_IMPL 35 #define GET_RISCVVSXSEGTable_IMPL 36 #define GET_RISCVVLETable_IMPL 37 #define GET_RISCVVSETable_IMPL 38 #define GET_RISCVVLXTable_IMPL 39 #define GET_RISCVVSXTable_IMPL 40 #define GET_RISCVMaskedPseudosTable_IMPL 41 #include "RISCVGenSearchableTables.inc" 42 } // namespace RISCV 43 } // namespace llvm 44 45 void RISCVDAGToDAGISel::PreprocessISelDAG() { 46 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 47 48 bool MadeChange = false; 49 while (Position != CurDAG->allnodes_begin()) { 50 SDNode *N = &*--Position; 51 if (N->use_empty()) 52 continue; 53 54 SDValue Result; 55 switch (N->getOpcode()) { 56 case ISD::SPLAT_VECTOR: { 57 // Convert integer SPLAT_VECTOR to VMV_V_X_VL and floating-point 58 // SPLAT_VECTOR to VFMV_V_F_VL to reduce isel burden. 59 MVT VT = N->getSimpleValueType(0); 60 unsigned Opc = 61 VT.isInteger() ? RISCVISD::VMV_V_X_VL : RISCVISD::VFMV_V_F_VL; 62 SDLoc DL(N); 63 SDValue VL = CurDAG->getRegister(RISCV::X0, Subtarget->getXLenVT()); 64 Result = CurDAG->getNode(Opc, DL, VT, CurDAG->getUNDEF(VT), 65 N->getOperand(0), VL); 66 break; 67 } 68 case RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL: { 69 // Lower SPLAT_VECTOR_SPLIT_I64 to two scalar stores and a stride 0 vector 70 // load. Done after lowering and combining so that we have a chance to 71 // optimize this to VMV_V_X_VL when the upper bits aren't needed. 72 assert(N->getNumOperands() == 4 && "Unexpected number of operands"); 73 MVT VT = N->getSimpleValueType(0); 74 SDValue Passthru = N->getOperand(0); 75 SDValue Lo = N->getOperand(1); 76 SDValue Hi = N->getOperand(2); 77 SDValue VL = N->getOperand(3); 78 assert(VT.getVectorElementType() == MVT::i64 && VT.isScalableVector() && 79 Lo.getValueType() == MVT::i32 && Hi.getValueType() == MVT::i32 && 80 "Unexpected VTs!"); 81 MachineFunction &MF = CurDAG->getMachineFunction(); 82 RISCVMachineFunctionInfo *FuncInfo = 83 MF.getInfo<RISCVMachineFunctionInfo>(); 84 SDLoc DL(N); 85 86 // We use the same frame index we use for moving two i32s into 64-bit FPR. 87 // This is an analogous operation. 88 int FI = FuncInfo->getMoveF64FrameIndex(MF); 89 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI); 90 const TargetLowering &TLI = CurDAG->getTargetLoweringInfo(); 91 SDValue StackSlot = 92 CurDAG->getFrameIndex(FI, TLI.getPointerTy(CurDAG->getDataLayout())); 93 94 SDValue Chain = CurDAG->getEntryNode(); 95 Lo = CurDAG->getStore(Chain, DL, Lo, StackSlot, MPI, Align(8)); 96 97 SDValue OffsetSlot = 98 CurDAG->getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), DL); 99 Hi = CurDAG->getStore(Chain, DL, Hi, OffsetSlot, MPI.getWithOffset(4), 100 Align(8)); 101 102 Chain = CurDAG->getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 103 104 SDVTList VTs = CurDAG->getVTList({VT, MVT::Other}); 105 SDValue IntID = 106 CurDAG->getTargetConstant(Intrinsic::riscv_vlse, DL, MVT::i64); 107 SDValue Ops[] = {Chain, 108 IntID, 109 Passthru, 110 StackSlot, 111 CurDAG->getRegister(RISCV::X0, MVT::i64), 112 VL}; 113 114 Result = CurDAG->getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, 115 MVT::i64, MPI, Align(8), 116 MachineMemOperand::MOLoad); 117 break; 118 } 119 } 120 121 if (Result) { 122 LLVM_DEBUG(dbgs() << "RISCV DAG preprocessing replacing:\nOld: "); 123 LLVM_DEBUG(N->dump(CurDAG)); 124 LLVM_DEBUG(dbgs() << "\nNew: "); 125 LLVM_DEBUG(Result->dump(CurDAG)); 126 LLVM_DEBUG(dbgs() << "\n"); 127 128 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 129 MadeChange = true; 130 } 131 } 132 133 if (MadeChange) 134 CurDAG->RemoveDeadNodes(); 135 } 136 137 void RISCVDAGToDAGISel::PostprocessISelDAG() { 138 HandleSDNode Dummy(CurDAG->getRoot()); 139 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 140 141 bool MadeChange = false; 142 while (Position != CurDAG->allnodes_begin()) { 143 SDNode *N = &*--Position; 144 // Skip dead nodes and any non-machine opcodes. 145 if (N->use_empty() || !N->isMachineOpcode()) 146 continue; 147 148 MadeChange |= doPeepholeSExtW(N); 149 MadeChange |= doPeepholeMaskedRVV(N); 150 } 151 152 CurDAG->setRoot(Dummy.getValue()); 153 154 if (MadeChange) 155 CurDAG->RemoveDeadNodes(); 156 } 157 158 static SDNode *selectImmSeq(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, 159 RISCVMatInt::InstSeq &Seq) { 160 SDNode *Result = nullptr; 161 SDValue SrcReg = CurDAG->getRegister(RISCV::X0, VT); 162 for (RISCVMatInt::Inst &Inst : Seq) { 163 SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, VT); 164 switch (Inst.getOpndKind()) { 165 case RISCVMatInt::Imm: 166 Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SDImm); 167 break; 168 case RISCVMatInt::RegX0: 169 Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, 170 CurDAG->getRegister(RISCV::X0, VT)); 171 break; 172 case RISCVMatInt::RegReg: 173 Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SrcReg); 174 break; 175 case RISCVMatInt::RegImm: 176 Result = CurDAG->getMachineNode(Inst.Opc, DL, VT, SrcReg, SDImm); 177 break; 178 } 179 180 // Only the first instruction has X0 as its source. 181 SrcReg = SDValue(Result, 0); 182 } 183 184 return Result; 185 } 186 187 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, const MVT VT, 188 int64_t Imm, const RISCVSubtarget &Subtarget) { 189 RISCVMatInt::InstSeq Seq = 190 RISCVMatInt::generateInstSeq(Imm, Subtarget.getFeatureBits()); 191 192 return selectImmSeq(CurDAG, DL, VT, Seq); 193 } 194 195 static SDValue createTuple(SelectionDAG &CurDAG, ArrayRef<SDValue> Regs, 196 unsigned NF, RISCVII::VLMUL LMUL) { 197 static const unsigned M1TupleRegClassIDs[] = { 198 RISCV::VRN2M1RegClassID, RISCV::VRN3M1RegClassID, RISCV::VRN4M1RegClassID, 199 RISCV::VRN5M1RegClassID, RISCV::VRN6M1RegClassID, RISCV::VRN7M1RegClassID, 200 RISCV::VRN8M1RegClassID}; 201 static const unsigned M2TupleRegClassIDs[] = {RISCV::VRN2M2RegClassID, 202 RISCV::VRN3M2RegClassID, 203 RISCV::VRN4M2RegClassID}; 204 205 assert(Regs.size() >= 2 && Regs.size() <= 8); 206 207 unsigned RegClassID; 208 unsigned SubReg0; 209 switch (LMUL) { 210 default: 211 llvm_unreachable("Invalid LMUL."); 212 case RISCVII::VLMUL::LMUL_F8: 213 case RISCVII::VLMUL::LMUL_F4: 214 case RISCVII::VLMUL::LMUL_F2: 215 case RISCVII::VLMUL::LMUL_1: 216 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, 217 "Unexpected subreg numbering"); 218 SubReg0 = RISCV::sub_vrm1_0; 219 RegClassID = M1TupleRegClassIDs[NF - 2]; 220 break; 221 case RISCVII::VLMUL::LMUL_2: 222 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, 223 "Unexpected subreg numbering"); 224 SubReg0 = RISCV::sub_vrm2_0; 225 RegClassID = M2TupleRegClassIDs[NF - 2]; 226 break; 227 case RISCVII::VLMUL::LMUL_4: 228 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, 229 "Unexpected subreg numbering"); 230 SubReg0 = RISCV::sub_vrm4_0; 231 RegClassID = RISCV::VRN2M4RegClassID; 232 break; 233 } 234 235 SDLoc DL(Regs[0]); 236 SmallVector<SDValue, 8> Ops; 237 238 Ops.push_back(CurDAG.getTargetConstant(RegClassID, DL, MVT::i32)); 239 240 for (unsigned I = 0; I < Regs.size(); ++I) { 241 Ops.push_back(Regs[I]); 242 Ops.push_back(CurDAG.getTargetConstant(SubReg0 + I, DL, MVT::i32)); 243 } 244 SDNode *N = 245 CurDAG.getMachineNode(TargetOpcode::REG_SEQUENCE, DL, MVT::Untyped, Ops); 246 return SDValue(N, 0); 247 } 248 249 void RISCVDAGToDAGISel::addVectorLoadStoreOperands( 250 SDNode *Node, unsigned Log2SEW, const SDLoc &DL, unsigned CurOp, 251 bool IsMasked, bool IsStridedOrIndexed, SmallVectorImpl<SDValue> &Operands, 252 bool IsLoad, MVT *IndexVT) { 253 SDValue Chain = Node->getOperand(0); 254 SDValue Glue; 255 256 Operands.push_back(Node->getOperand(CurOp++)); // Base pointer. 257 258 if (IsStridedOrIndexed) { 259 Operands.push_back(Node->getOperand(CurOp++)); // Index. 260 if (IndexVT) 261 *IndexVT = Operands.back()->getSimpleValueType(0); 262 } 263 264 if (IsMasked) { 265 // Mask needs to be copied to V0. 266 SDValue Mask = Node->getOperand(CurOp++); 267 Chain = CurDAG->getCopyToReg(Chain, DL, RISCV::V0, Mask, SDValue()); 268 Glue = Chain.getValue(1); 269 Operands.push_back(CurDAG->getRegister(RISCV::V0, Mask.getValueType())); 270 } 271 SDValue VL; 272 selectVLOp(Node->getOperand(CurOp++), VL); 273 Operands.push_back(VL); 274 275 MVT XLenVT = Subtarget->getXLenVT(); 276 SDValue SEWOp = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); 277 Operands.push_back(SEWOp); 278 279 // Masked load has the tail policy argument. 280 if (IsMasked && IsLoad) { 281 // Policy must be a constant. 282 uint64_t Policy = Node->getConstantOperandVal(CurOp++); 283 SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT); 284 Operands.push_back(PolicyOp); 285 } 286 287 Operands.push_back(Chain); // Chain. 288 if (Glue) 289 Operands.push_back(Glue); 290 } 291 292 static bool isAllUndef(ArrayRef<SDValue> Values) { 293 return llvm::all_of(Values, [](SDValue V) { return V->isUndef(); }); 294 } 295 296 void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, bool IsMasked, 297 bool IsStrided) { 298 SDLoc DL(Node); 299 unsigned NF = Node->getNumValues() - 1; 300 MVT VT = Node->getSimpleValueType(0); 301 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 302 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 303 304 unsigned CurOp = 2; 305 SmallVector<SDValue, 8> Operands; 306 307 SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp, 308 Node->op_begin() + CurOp + NF); 309 bool IsTU = IsMasked || !isAllUndef(Regs); 310 if (IsTU) { 311 SDValue Merge = createTuple(*CurDAG, Regs, NF, LMUL); 312 Operands.push_back(Merge); 313 } 314 CurOp += NF; 315 316 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, 317 Operands, /*IsLoad=*/true); 318 319 const RISCV::VLSEGPseudo *P = 320 RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW, 321 static_cast<unsigned>(LMUL)); 322 MachineSDNode *Load = 323 CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); 324 325 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 326 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); 327 328 SDValue SuperReg = SDValue(Load, 0); 329 for (unsigned I = 0; I < NF; ++I) { 330 unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I); 331 ReplaceUses(SDValue(Node, I), 332 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); 333 } 334 335 ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); 336 CurDAG->RemoveDeadNode(Node); 337 } 338 339 void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, bool IsMasked) { 340 SDLoc DL(Node); 341 unsigned NF = Node->getNumValues() - 2; // Do not count VL and Chain. 342 MVT VT = Node->getSimpleValueType(0); 343 MVT XLenVT = Subtarget->getXLenVT(); 344 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 345 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 346 347 unsigned CurOp = 2; 348 SmallVector<SDValue, 7> Operands; 349 350 SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp, 351 Node->op_begin() + CurOp + NF); 352 bool IsTU = IsMasked || !isAllUndef(Regs); 353 if (IsTU) { 354 SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); 355 Operands.push_back(MaskedOff); 356 } 357 CurOp += NF; 358 359 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, 360 /*IsStridedOrIndexed*/ false, Operands, 361 /*IsLoad=*/true); 362 363 const RISCV::VLSEGPseudo *P = 364 RISCV::getVLSEGPseudo(NF, IsMasked, IsTU, /*Strided*/ false, /*FF*/ true, 365 Log2SEW, static_cast<unsigned>(LMUL)); 366 MachineSDNode *Load = CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, 367 XLenVT, MVT::Other, Operands); 368 369 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 370 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); 371 372 SDValue SuperReg = SDValue(Load, 0); 373 for (unsigned I = 0; I < NF; ++I) { 374 unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I); 375 ReplaceUses(SDValue(Node, I), 376 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); 377 } 378 379 ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); // VL 380 ReplaceUses(SDValue(Node, NF + 1), SDValue(Load, 2)); // Chain 381 CurDAG->RemoveDeadNode(Node); 382 } 383 384 void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, bool IsMasked, 385 bool IsOrdered) { 386 SDLoc DL(Node); 387 unsigned NF = Node->getNumValues() - 1; 388 MVT VT = Node->getSimpleValueType(0); 389 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 390 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 391 392 unsigned CurOp = 2; 393 SmallVector<SDValue, 8> Operands; 394 395 SmallVector<SDValue, 8> Regs(Node->op_begin() + CurOp, 396 Node->op_begin() + CurOp + NF); 397 bool IsTU = IsMasked || !isAllUndef(Regs); 398 if (IsTU) { 399 SDValue MaskedOff = createTuple(*CurDAG, Regs, NF, LMUL); 400 Operands.push_back(MaskedOff); 401 } 402 CurOp += NF; 403 404 MVT IndexVT; 405 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, 406 /*IsStridedOrIndexed*/ true, Operands, 407 /*IsLoad=*/true, &IndexVT); 408 409 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 410 "Element count mismatch"); 411 412 RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); 413 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); 414 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { 415 report_fatal_error("The V extension does not support EEW=64 for index " 416 "values when XLEN=32"); 417 } 418 const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo( 419 NF, IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), 420 static_cast<unsigned>(IndexLMUL)); 421 MachineSDNode *Load = 422 CurDAG->getMachineNode(P->Pseudo, DL, MVT::Untyped, MVT::Other, Operands); 423 424 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 425 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); 426 427 SDValue SuperReg = SDValue(Load, 0); 428 for (unsigned I = 0; I < NF; ++I) { 429 unsigned SubRegIdx = RISCVTargetLowering::getSubregIndexByMVT(VT, I); 430 ReplaceUses(SDValue(Node, I), 431 CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, SuperReg)); 432 } 433 434 ReplaceUses(SDValue(Node, NF), SDValue(Load, 1)); 435 CurDAG->RemoveDeadNode(Node); 436 } 437 438 void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, bool IsMasked, 439 bool IsStrided) { 440 SDLoc DL(Node); 441 unsigned NF = Node->getNumOperands() - 4; 442 if (IsStrided) 443 NF--; 444 if (IsMasked) 445 NF--; 446 MVT VT = Node->getOperand(2)->getSimpleValueType(0); 447 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 448 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 449 SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); 450 SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); 451 452 SmallVector<SDValue, 8> Operands; 453 Operands.push_back(StoreVal); 454 unsigned CurOp = 2 + NF; 455 456 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, 457 Operands); 458 459 const RISCV::VSSEGPseudo *P = RISCV::getVSSEGPseudo( 460 NF, IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL)); 461 MachineSDNode *Store = 462 CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); 463 464 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 465 CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()}); 466 467 ReplaceNode(Node, Store); 468 } 469 470 void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, bool IsMasked, 471 bool IsOrdered) { 472 SDLoc DL(Node); 473 unsigned NF = Node->getNumOperands() - 5; 474 if (IsMasked) 475 --NF; 476 MVT VT = Node->getOperand(2)->getSimpleValueType(0); 477 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 478 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 479 SmallVector<SDValue, 8> Regs(Node->op_begin() + 2, Node->op_begin() + 2 + NF); 480 SDValue StoreVal = createTuple(*CurDAG, Regs, NF, LMUL); 481 482 SmallVector<SDValue, 8> Operands; 483 Operands.push_back(StoreVal); 484 unsigned CurOp = 2 + NF; 485 486 MVT IndexVT; 487 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, 488 /*IsStridedOrIndexed*/ true, Operands, 489 /*IsLoad=*/false, &IndexVT); 490 491 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 492 "Element count mismatch"); 493 494 RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); 495 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); 496 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { 497 report_fatal_error("The V extension does not support EEW=64 for index " 498 "values when XLEN=32"); 499 } 500 const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo( 501 NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), 502 static_cast<unsigned>(IndexLMUL)); 503 MachineSDNode *Store = 504 CurDAG->getMachineNode(P->Pseudo, DL, Node->getValueType(0), Operands); 505 506 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 507 CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()}); 508 509 ReplaceNode(Node, Store); 510 } 511 512 void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) { 513 if (!Subtarget->hasVInstructions()) 514 return; 515 516 assert((Node->getOpcode() == ISD::INTRINSIC_W_CHAIN || 517 Node->getOpcode() == ISD::INTRINSIC_WO_CHAIN) && 518 "Unexpected opcode"); 519 520 SDLoc DL(Node); 521 MVT XLenVT = Subtarget->getXLenVT(); 522 523 bool HasChain = Node->getOpcode() == ISD::INTRINSIC_W_CHAIN; 524 unsigned IntNoOffset = HasChain ? 1 : 0; 525 unsigned IntNo = Node->getConstantOperandVal(IntNoOffset); 526 527 assert((IntNo == Intrinsic::riscv_vsetvli || 528 IntNo == Intrinsic::riscv_vsetvlimax || 529 IntNo == Intrinsic::riscv_vsetvli_opt || 530 IntNo == Intrinsic::riscv_vsetvlimax_opt) && 531 "Unexpected vsetvli intrinsic"); 532 533 bool VLMax = IntNo == Intrinsic::riscv_vsetvlimax || 534 IntNo == Intrinsic::riscv_vsetvlimax_opt; 535 unsigned Offset = IntNoOffset + (VLMax ? 1 : 2); 536 537 assert(Node->getNumOperands() == Offset + 2 && 538 "Unexpected number of operands"); 539 540 unsigned SEW = 541 RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7); 542 RISCVII::VLMUL VLMul = static_cast<RISCVII::VLMUL>( 543 Node->getConstantOperandVal(Offset + 1) & 0x7); 544 545 unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true, 546 /*MaskAgnostic*/ false); 547 SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT); 548 549 SmallVector<EVT, 2> VTs = {XLenVT}; 550 if (HasChain) 551 VTs.push_back(MVT::Other); 552 553 SDValue VLOperand; 554 unsigned Opcode = RISCV::PseudoVSETVLI; 555 if (VLMax) { 556 VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT); 557 Opcode = RISCV::PseudoVSETVLIX0; 558 } else { 559 VLOperand = Node->getOperand(IntNoOffset + 1); 560 561 if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) { 562 uint64_t AVL = C->getZExtValue(); 563 if (isUInt<5>(AVL)) { 564 SDValue VLImm = CurDAG->getTargetConstant(AVL, DL, XLenVT); 565 SmallVector<SDValue, 3> Ops = {VLImm, VTypeIOp}; 566 if (HasChain) 567 Ops.push_back(Node->getOperand(0)); 568 ReplaceNode( 569 Node, CurDAG->getMachineNode(RISCV::PseudoVSETIVLI, DL, VTs, Ops)); 570 return; 571 } 572 } 573 } 574 575 SmallVector<SDValue, 3> Ops = {VLOperand, VTypeIOp}; 576 if (HasChain) 577 Ops.push_back(Node->getOperand(0)); 578 579 ReplaceNode(Node, CurDAG->getMachineNode(Opcode, DL, VTs, Ops)); 580 } 581 582 void RISCVDAGToDAGISel::Select(SDNode *Node) { 583 // If we have a custom node, we have already selected. 584 if (Node->isMachineOpcode()) { 585 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n"); 586 Node->setNodeId(-1); 587 return; 588 } 589 590 // Instruction Selection not handled by the auto-generated tablegen selection 591 // should be handled here. 592 unsigned Opcode = Node->getOpcode(); 593 MVT XLenVT = Subtarget->getXLenVT(); 594 SDLoc DL(Node); 595 MVT VT = Node->getSimpleValueType(0); 596 597 switch (Opcode) { 598 case ISD::Constant: { 599 auto *ConstNode = cast<ConstantSDNode>(Node); 600 if (VT == XLenVT && ConstNode->isZero()) { 601 SDValue New = 602 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT); 603 ReplaceNode(Node, New.getNode()); 604 return; 605 } 606 int64_t Imm = ConstNode->getSExtValue(); 607 // If the upper XLen-16 bits are not used, try to convert this to a simm12 608 // by sign extending bit 15. 609 if (isUInt<16>(Imm) && isInt<12>(SignExtend64<16>(Imm)) && 610 hasAllHUsers(Node)) 611 Imm = SignExtend64<16>(Imm); 612 // If the upper 32-bits are not used try to convert this into a simm32 by 613 // sign extending bit 32. 614 if (!isInt<32>(Imm) && isUInt<32>(Imm) && hasAllWUsers(Node)) 615 Imm = SignExtend64<32>(Imm); 616 617 ReplaceNode(Node, selectImm(CurDAG, DL, VT, Imm, *Subtarget)); 618 return; 619 } 620 case ISD::SHL: { 621 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1)); 622 if (!N1C) 623 break; 624 SDValue N0 = Node->getOperand(0); 625 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() || 626 !isa<ConstantSDNode>(N0.getOperand(1))) 627 break; 628 unsigned ShAmt = N1C->getZExtValue(); 629 uint64_t Mask = N0.getConstantOperandVal(1); 630 631 // Optimize (shl (and X, C2), C) -> (slli (srliw X, C3), C3+C) where C2 has 632 // 32 leading zeros and C3 trailing zeros. 633 if (ShAmt <= 32 && isShiftedMask_64(Mask)) { 634 unsigned XLen = Subtarget->getXLen(); 635 unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask)); 636 unsigned TrailingZeros = countTrailingZeros(Mask); 637 if (TrailingZeros > 0 && LeadingZeros == 32) { 638 SDNode *SRLIW = CurDAG->getMachineNode( 639 RISCV::SRLIW, DL, VT, N0->getOperand(0), 640 CurDAG->getTargetConstant(TrailingZeros, DL, VT)); 641 SDNode *SLLI = CurDAG->getMachineNode( 642 RISCV::SLLI, DL, VT, SDValue(SRLIW, 0), 643 CurDAG->getTargetConstant(TrailingZeros + ShAmt, DL, VT)); 644 ReplaceNode(Node, SLLI); 645 return; 646 } 647 } 648 break; 649 } 650 case ISD::SRL: { 651 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1)); 652 if (!N1C) 653 break; 654 SDValue N0 = Node->getOperand(0); 655 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse() || 656 !isa<ConstantSDNode>(N0.getOperand(1))) 657 break; 658 unsigned ShAmt = N1C->getZExtValue(); 659 uint64_t Mask = N0.getConstantOperandVal(1); 660 661 // Optimize (srl (and X, C2), C) -> (slli (srliw X, C3), C3-C) where C2 has 662 // 32 leading zeros and C3 trailing zeros. 663 if (isShiftedMask_64(Mask)) { 664 unsigned XLen = Subtarget->getXLen(); 665 unsigned LeadingZeros = XLen - (64 - countLeadingZeros(Mask)); 666 unsigned TrailingZeros = countTrailingZeros(Mask); 667 if (LeadingZeros == 32 && TrailingZeros > ShAmt) { 668 SDNode *SRLIW = CurDAG->getMachineNode( 669 RISCV::SRLIW, DL, VT, N0->getOperand(0), 670 CurDAG->getTargetConstant(TrailingZeros, DL, VT)); 671 SDNode *SLLI = CurDAG->getMachineNode( 672 RISCV::SLLI, DL, VT, SDValue(SRLIW, 0), 673 CurDAG->getTargetConstant(TrailingZeros - ShAmt, DL, VT)); 674 ReplaceNode(Node, SLLI); 675 return; 676 } 677 } 678 679 // Optimize (srl (and X, C2), C) -> 680 // (srli (slli X, (XLen-C3), (XLen-C3) + C) 681 // Where C2 is a mask with C3 trailing ones. 682 // Taking into account that the C2 may have had lower bits unset by 683 // SimplifyDemandedBits. This avoids materializing the C2 immediate. 684 // This pattern occurs when type legalizing right shifts for types with 685 // less than XLen bits. 686 Mask |= maskTrailingOnes<uint64_t>(ShAmt); 687 if (!isMask_64(Mask)) 688 break; 689 unsigned TrailingOnes = countTrailingOnes(Mask); 690 // 32 trailing ones should use srliw via tablegen pattern. 691 if (TrailingOnes == 32 || ShAmt >= TrailingOnes) 692 break; 693 unsigned LShAmt = Subtarget->getXLen() - TrailingOnes; 694 SDNode *SLLI = 695 CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0), 696 CurDAG->getTargetConstant(LShAmt, DL, VT)); 697 SDNode *SRLI = CurDAG->getMachineNode( 698 RISCV::SRLI, DL, VT, SDValue(SLLI, 0), 699 CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT)); 700 ReplaceNode(Node, SRLI); 701 return; 702 } 703 case ISD::SRA: { 704 // Optimize (sra (sext_inreg X, i16), C) -> 705 // (srai (slli X, (XLen-16), (XLen-16) + C) 706 // And (sra (sext_inreg X, i8), C) -> 707 // (srai (slli X, (XLen-8), (XLen-8) + C) 708 // This can occur when Zbb is enabled, which makes sext_inreg i16/i8 legal. 709 // This transform matches the code we get without Zbb. The shifts are more 710 // compressible, and this can help expose CSE opportunities in the sdiv by 711 // constant optimization. 712 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1)); 713 if (!N1C) 714 break; 715 SDValue N0 = Node->getOperand(0); 716 if (N0.getOpcode() != ISD::SIGN_EXTEND_INREG || !N0.hasOneUse()) 717 break; 718 unsigned ShAmt = N1C->getZExtValue(); 719 unsigned ExtSize = 720 cast<VTSDNode>(N0.getOperand(1))->getVT().getSizeInBits(); 721 // ExtSize of 32 should use sraiw via tablegen pattern. 722 if (ExtSize >= 32 || ShAmt >= ExtSize) 723 break; 724 unsigned LShAmt = Subtarget->getXLen() - ExtSize; 725 SDNode *SLLI = 726 CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0->getOperand(0), 727 CurDAG->getTargetConstant(LShAmt, DL, VT)); 728 SDNode *SRAI = CurDAG->getMachineNode( 729 RISCV::SRAI, DL, VT, SDValue(SLLI, 0), 730 CurDAG->getTargetConstant(LShAmt + ShAmt, DL, VT)); 731 ReplaceNode(Node, SRAI); 732 return; 733 } 734 case ISD::AND: { 735 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1)); 736 if (!N1C) 737 break; 738 739 SDValue N0 = Node->getOperand(0); 740 741 bool LeftShift = N0.getOpcode() == ISD::SHL; 742 if (!LeftShift && N0.getOpcode() != ISD::SRL) 743 break; 744 745 auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 746 if (!C) 747 break; 748 unsigned C2 = C->getZExtValue(); 749 unsigned XLen = Subtarget->getXLen(); 750 assert((C2 > 0 && C2 < XLen) && "Unexpected shift amount!"); 751 752 uint64_t C1 = N1C->getZExtValue(); 753 754 // Keep track of whether this is a c.andi. If we can't use c.andi, the 755 // shift pair might offer more compression opportunities. 756 // TODO: We could check for C extension here, but we don't have many lit 757 // tests with the C extension enabled so not checking gets better coverage. 758 // TODO: What if ANDI faster than shift? 759 bool IsCANDI = isInt<6>(N1C->getSExtValue()); 760 761 // Clear irrelevant bits in the mask. 762 if (LeftShift) 763 C1 &= maskTrailingZeros<uint64_t>(C2); 764 else 765 C1 &= maskTrailingOnes<uint64_t>(XLen - C2); 766 767 // Some transforms should only be done if the shift has a single use or 768 // the AND would become (srli (slli X, 32), 32) 769 bool OneUseOrZExtW = N0.hasOneUse() || C1 == UINT64_C(0xFFFFFFFF); 770 771 SDValue X = N0.getOperand(0); 772 773 // Turn (and (srl x, c2) c1) -> (srli (slli x, c3-c2), c3) if c1 is a mask 774 // with c3 leading zeros. 775 if (!LeftShift && isMask_64(C1)) { 776 unsigned Leading = XLen - (64 - countLeadingZeros(C1)); 777 if (C2 < Leading) { 778 // If the number of leading zeros is C2+32 this can be SRLIW. 779 if (C2 + 32 == Leading) { 780 SDNode *SRLIW = CurDAG->getMachineNode( 781 RISCV::SRLIW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT)); 782 ReplaceNode(Node, SRLIW); 783 return; 784 } 785 786 // (and (srl (sexti32 Y), c2), c1) -> (srliw (sraiw Y, 31), c3 - 32) if 787 // c1 is a mask with c3 leading zeros and c2 >= 32 and c3-c2==1. 788 // 789 // This pattern occurs when (i32 (srl (sra 31), c3 - 32)) is type 790 // legalized and goes through DAG combine. 791 if (C2 >= 32 && (Leading - C2) == 1 && N0.hasOneUse() && 792 X.getOpcode() == ISD::SIGN_EXTEND_INREG && 793 cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32) { 794 SDNode *SRAIW = 795 CurDAG->getMachineNode(RISCV::SRAIW, DL, VT, X.getOperand(0), 796 CurDAG->getTargetConstant(31, DL, VT)); 797 SDNode *SRLIW = CurDAG->getMachineNode( 798 RISCV::SRLIW, DL, VT, SDValue(SRAIW, 0), 799 CurDAG->getTargetConstant(Leading - 32, DL, VT)); 800 ReplaceNode(Node, SRLIW); 801 return; 802 } 803 804 // (srli (slli x, c3-c2), c3). 805 // Skip if we could use (zext.w (sraiw X, C2)). 806 bool Skip = Subtarget->hasStdExtZba() && Leading == 32 && 807 X.getOpcode() == ISD::SIGN_EXTEND_INREG && 808 cast<VTSDNode>(X.getOperand(1))->getVT() == MVT::i32; 809 // Also Skip if we can use bexti. 810 Skip |= Subtarget->hasStdExtZbs() && Leading == XLen - 1; 811 if (OneUseOrZExtW && !Skip) { 812 SDNode *SLLI = CurDAG->getMachineNode( 813 RISCV::SLLI, DL, VT, X, 814 CurDAG->getTargetConstant(Leading - C2, DL, VT)); 815 SDNode *SRLI = CurDAG->getMachineNode( 816 RISCV::SRLI, DL, VT, SDValue(SLLI, 0), 817 CurDAG->getTargetConstant(Leading, DL, VT)); 818 ReplaceNode(Node, SRLI); 819 return; 820 } 821 } 822 } 823 824 // Turn (and (shl x, c2), c1) -> (srli (slli c2+c3), c3) if c1 is a mask 825 // shifted by c2 bits with c3 leading zeros. 826 if (LeftShift && isShiftedMask_64(C1)) { 827 unsigned Leading = XLen - (64 - countLeadingZeros(C1)); 828 829 if (C2 + Leading < XLen && 830 C1 == (maskTrailingOnes<uint64_t>(XLen - (C2 + Leading)) << C2)) { 831 // Use slli.uw when possible. 832 if ((XLen - (C2 + Leading)) == 32 && Subtarget->hasStdExtZba()) { 833 SDNode *SLLI_UW = CurDAG->getMachineNode( 834 RISCV::SLLI_UW, DL, VT, X, CurDAG->getTargetConstant(C2, DL, VT)); 835 ReplaceNode(Node, SLLI_UW); 836 return; 837 } 838 839 // (srli (slli c2+c3), c3) 840 if (OneUseOrZExtW && !IsCANDI) { 841 SDNode *SLLI = CurDAG->getMachineNode( 842 RISCV::SLLI, DL, VT, X, 843 CurDAG->getTargetConstant(C2 + Leading, DL, VT)); 844 SDNode *SRLI = CurDAG->getMachineNode( 845 RISCV::SRLI, DL, VT, SDValue(SLLI, 0), 846 CurDAG->getTargetConstant(Leading, DL, VT)); 847 ReplaceNode(Node, SRLI); 848 return; 849 } 850 } 851 } 852 853 // Turn (and (shr x, c2), c1) -> (slli (srli x, c2+c3), c3) if c1 is a 854 // shifted mask with c2 leading zeros and c3 trailing zeros. 855 if (!LeftShift && isShiftedMask_64(C1)) { 856 unsigned Leading = XLen - (64 - countLeadingZeros(C1)); 857 unsigned Trailing = countTrailingZeros(C1); 858 if (Leading == C2 && C2 + Trailing < XLen && OneUseOrZExtW && !IsCANDI) { 859 unsigned SrliOpc = RISCV::SRLI; 860 // If the input is zexti32 we should use SRLIW. 861 if (X.getOpcode() == ISD::AND && isa<ConstantSDNode>(X.getOperand(1)) && 862 X.getConstantOperandVal(1) == UINT64_C(0xFFFFFFFF)) { 863 SrliOpc = RISCV::SRLIW; 864 X = X.getOperand(0); 865 } 866 SDNode *SRLI = CurDAG->getMachineNode( 867 SrliOpc, DL, VT, X, 868 CurDAG->getTargetConstant(C2 + Trailing, DL, VT)); 869 SDNode *SLLI = 870 CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0), 871 CurDAG->getTargetConstant(Trailing, DL, VT)); 872 ReplaceNode(Node, SLLI); 873 return; 874 } 875 // If the leading zero count is C2+32, we can use SRLIW instead of SRLI. 876 if (Leading > 32 && (Leading - 32) == C2 && C2 + Trailing < 32 && 877 OneUseOrZExtW && !IsCANDI) { 878 SDNode *SRLIW = CurDAG->getMachineNode( 879 RISCV::SRLIW, DL, VT, X, 880 CurDAG->getTargetConstant(C2 + Trailing, DL, VT)); 881 SDNode *SLLI = 882 CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0), 883 CurDAG->getTargetConstant(Trailing, DL, VT)); 884 ReplaceNode(Node, SLLI); 885 return; 886 } 887 } 888 889 // Turn (and (shl x, c2), c1) -> (slli (srli x, c3-c2), c3) if c1 is a 890 // shifted mask with no leading zeros and c3 trailing zeros. 891 if (LeftShift && isShiftedMask_64(C1)) { 892 unsigned Leading = XLen - (64 - countLeadingZeros(C1)); 893 unsigned Trailing = countTrailingZeros(C1); 894 if (Leading == 0 && C2 < Trailing && OneUseOrZExtW && !IsCANDI) { 895 SDNode *SRLI = CurDAG->getMachineNode( 896 RISCV::SRLI, DL, VT, X, 897 CurDAG->getTargetConstant(Trailing - C2, DL, VT)); 898 SDNode *SLLI = 899 CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLI, 0), 900 CurDAG->getTargetConstant(Trailing, DL, VT)); 901 ReplaceNode(Node, SLLI); 902 return; 903 } 904 // If we have (32-C2) leading zeros, we can use SRLIW instead of SRLI. 905 if (C2 < Trailing && Leading + C2 == 32 && OneUseOrZExtW && !IsCANDI) { 906 SDNode *SRLIW = CurDAG->getMachineNode( 907 RISCV::SRLIW, DL, VT, X, 908 CurDAG->getTargetConstant(Trailing - C2, DL, VT)); 909 SDNode *SLLI = 910 CurDAG->getMachineNode(RISCV::SLLI, DL, VT, SDValue(SRLIW, 0), 911 CurDAG->getTargetConstant(Trailing, DL, VT)); 912 ReplaceNode(Node, SLLI); 913 return; 914 } 915 } 916 917 break; 918 } 919 case ISD::MUL: { 920 // Special case for calculating (mul (and X, C2), C1) where the full product 921 // fits in XLen bits. We can shift X left by the number of leading zeros in 922 // C2 and shift C1 left by XLen-lzcnt(C2). This will ensure the final 923 // product has XLen trailing zeros, putting it in the output of MULHU. This 924 // can avoid materializing a constant in a register for C2. 925 926 // RHS should be a constant. 927 auto *N1C = dyn_cast<ConstantSDNode>(Node->getOperand(1)); 928 if (!N1C || !N1C->hasOneUse()) 929 break; 930 931 // LHS should be an AND with constant. 932 SDValue N0 = Node->getOperand(0); 933 if (N0.getOpcode() != ISD::AND || !isa<ConstantSDNode>(N0.getOperand(1))) 934 break; 935 936 uint64_t C2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue(); 937 938 // Constant should be a mask. 939 if (!isMask_64(C2)) 940 break; 941 942 // If this can be an ANDI, ZEXT.H or ZEXT.W, don't do this if the ANDI/ZEXT 943 // has multiple users or the constant is a simm12. This prevents inserting 944 // a shift and still have uses of the AND/ZEXT. Shifting a simm12 will 945 // likely make it more costly to materialize. Otherwise, using a SLLI 946 // might allow it to be compressed. 947 bool IsANDIOrZExt = 948 isInt<12>(C2) || 949 (C2 == UINT64_C(0xFFFF) && 950 (Subtarget->hasStdExtZbb() || Subtarget->hasStdExtZbp())) || 951 (C2 == UINT64_C(0xFFFFFFFF) && Subtarget->hasStdExtZba()); 952 if (IsANDIOrZExt && (isInt<12>(N1C->getSExtValue()) || !N0.hasOneUse())) 953 break; 954 955 // We need to shift left the AND input and C1 by a total of XLen bits. 956 957 // How far left do we need to shift the AND input? 958 unsigned XLen = Subtarget->getXLen(); 959 unsigned LeadingZeros = XLen - (64 - countLeadingZeros(C2)); 960 961 // The constant gets shifted by the remaining amount unless that would 962 // shift bits out. 963 uint64_t C1 = N1C->getZExtValue(); 964 unsigned ConstantShift = XLen - LeadingZeros; 965 if (ConstantShift > (XLen - (64 - countLeadingZeros(C1)))) 966 break; 967 968 uint64_t ShiftedC1 = C1 << ConstantShift; 969 // If this RV32, we need to sign extend the constant. 970 if (XLen == 32) 971 ShiftedC1 = SignExtend64<32>(ShiftedC1); 972 973 // Create (mulhu (slli X, lzcnt(C2)), C1 << (XLen - lzcnt(C2))). 974 SDNode *Imm = selectImm(CurDAG, DL, VT, ShiftedC1, *Subtarget); 975 SDNode *SLLI = 976 CurDAG->getMachineNode(RISCV::SLLI, DL, VT, N0.getOperand(0), 977 CurDAG->getTargetConstant(LeadingZeros, DL, VT)); 978 SDNode *MULHU = CurDAG->getMachineNode(RISCV::MULHU, DL, VT, 979 SDValue(SLLI, 0), SDValue(Imm, 0)); 980 ReplaceNode(Node, MULHU); 981 return; 982 } 983 case ISD::INTRINSIC_WO_CHAIN: { 984 unsigned IntNo = Node->getConstantOperandVal(0); 985 switch (IntNo) { 986 // By default we do not custom select any intrinsic. 987 default: 988 break; 989 case Intrinsic::riscv_vmsgeu: 990 case Intrinsic::riscv_vmsge: { 991 SDValue Src1 = Node->getOperand(1); 992 SDValue Src2 = Node->getOperand(2); 993 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu; 994 bool IsCmpUnsignedZero = false; 995 // Only custom select scalar second operand. 996 if (Src2.getValueType() != XLenVT) 997 break; 998 // Small constants are handled with patterns. 999 if (auto *C = dyn_cast<ConstantSDNode>(Src2)) { 1000 int64_t CVal = C->getSExtValue(); 1001 if (CVal >= -15 && CVal <= 16) { 1002 if (!IsUnsigned || CVal != 0) 1003 break; 1004 IsCmpUnsignedZero = true; 1005 } 1006 } 1007 MVT Src1VT = Src1.getSimpleValueType(); 1008 unsigned VMSLTOpcode, VMNANDOpcode, VMSetOpcode; 1009 switch (RISCVTargetLowering::getLMUL(Src1VT)) { 1010 default: 1011 llvm_unreachable("Unexpected LMUL!"); 1012 #define CASE_VMSLT_VMNAND_VMSET_OPCODES(lmulenum, suffix, suffix_b) \ 1013 case RISCVII::VLMUL::lmulenum: \ 1014 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ 1015 : RISCV::PseudoVMSLT_VX_##suffix; \ 1016 VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \ 1017 VMSetOpcode = RISCV::PseudoVMSET_M_##suffix_b; \ 1018 break; 1019 CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F8, MF8, B1) 1020 CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F4, MF4, B2) 1021 CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_F2, MF2, B4) 1022 CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_1, M1, B8) 1023 CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_2, M2, B16) 1024 CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_4, M4, B32) 1025 CASE_VMSLT_VMNAND_VMSET_OPCODES(LMUL_8, M8, B64) 1026 #undef CASE_VMSLT_VMNAND_VMSET_OPCODES 1027 } 1028 SDValue SEW = CurDAG->getTargetConstant( 1029 Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT); 1030 SDValue VL; 1031 selectVLOp(Node->getOperand(3), VL); 1032 1033 // If vmsgeu with 0 immediate, expand it to vmset. 1034 if (IsCmpUnsignedZero) { 1035 ReplaceNode(Node, CurDAG->getMachineNode(VMSetOpcode, DL, VT, VL, SEW)); 1036 return; 1037 } 1038 1039 // Expand to 1040 // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd 1041 SDValue Cmp = SDValue( 1042 CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}), 1043 0); 1044 ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT, 1045 {Cmp, Cmp, VL, SEW})); 1046 return; 1047 } 1048 case Intrinsic::riscv_vmsgeu_mask: 1049 case Intrinsic::riscv_vmsge_mask: { 1050 SDValue Src1 = Node->getOperand(2); 1051 SDValue Src2 = Node->getOperand(3); 1052 bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask; 1053 bool IsCmpUnsignedZero = false; 1054 // Only custom select scalar second operand. 1055 if (Src2.getValueType() != XLenVT) 1056 break; 1057 // Small constants are handled with patterns. 1058 if (auto *C = dyn_cast<ConstantSDNode>(Src2)) { 1059 int64_t CVal = C->getSExtValue(); 1060 if (CVal >= -15 && CVal <= 16) { 1061 if (!IsUnsigned || CVal != 0) 1062 break; 1063 IsCmpUnsignedZero = true; 1064 } 1065 } 1066 MVT Src1VT = Src1.getSimpleValueType(); 1067 unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOpcode, 1068 VMOROpcode; 1069 switch (RISCVTargetLowering::getLMUL(Src1VT)) { 1070 default: 1071 llvm_unreachable("Unexpected LMUL!"); 1072 #define CASE_VMSLT_OPCODES(lmulenum, suffix, suffix_b) \ 1073 case RISCVII::VLMUL::lmulenum: \ 1074 VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ 1075 : RISCV::PseudoVMSLT_VX_##suffix; \ 1076 VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \ 1077 : RISCV::PseudoVMSLT_VX_##suffix##_MASK; \ 1078 break; 1079 CASE_VMSLT_OPCODES(LMUL_F8, MF8, B1) 1080 CASE_VMSLT_OPCODES(LMUL_F4, MF4, B2) 1081 CASE_VMSLT_OPCODES(LMUL_F2, MF2, B4) 1082 CASE_VMSLT_OPCODES(LMUL_1, M1, B8) 1083 CASE_VMSLT_OPCODES(LMUL_2, M2, B16) 1084 CASE_VMSLT_OPCODES(LMUL_4, M4, B32) 1085 CASE_VMSLT_OPCODES(LMUL_8, M8, B64) 1086 #undef CASE_VMSLT_OPCODES 1087 } 1088 // Mask operations use the LMUL from the mask type. 1089 switch (RISCVTargetLowering::getLMUL(VT)) { 1090 default: 1091 llvm_unreachable("Unexpected LMUL!"); 1092 #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \ 1093 case RISCVII::VLMUL::lmulenum: \ 1094 VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \ 1095 VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \ 1096 VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \ 1097 break; 1098 CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F8, MF8) 1099 CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F4, MF4) 1100 CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_F2, MF2) 1101 CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_1, M1) 1102 CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_2, M2) 1103 CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_4, M4) 1104 CASE_VMXOR_VMANDN_VMOR_OPCODES(LMUL_8, M8) 1105 #undef CASE_VMXOR_VMANDN_VMOR_OPCODES 1106 } 1107 SDValue SEW = CurDAG->getTargetConstant( 1108 Log2_32(Src1VT.getScalarSizeInBits()), DL, XLenVT); 1109 SDValue MaskSEW = CurDAG->getTargetConstant(0, DL, XLenVT); 1110 SDValue VL; 1111 selectVLOp(Node->getOperand(5), VL); 1112 SDValue MaskedOff = Node->getOperand(1); 1113 SDValue Mask = Node->getOperand(4); 1114 1115 // If vmsgeu_mask with 0 immediate, expand it to vmor mask, maskedoff. 1116 if (IsCmpUnsignedZero) { 1117 // We don't need vmor if the MaskedOff and the Mask are the same 1118 // value. 1119 if (Mask == MaskedOff) { 1120 ReplaceUses(Node, Mask.getNode()); 1121 return; 1122 } 1123 ReplaceNode(Node, 1124 CurDAG->getMachineNode(VMOROpcode, DL, VT, 1125 {Mask, MaskedOff, VL, MaskSEW})); 1126 return; 1127 } 1128 1129 // If the MaskedOff value and the Mask are the same value use 1130 // vmslt{u}.vx vt, va, x; vmandn.mm vd, vd, vt 1131 // This avoids needing to copy v0 to vd before starting the next sequence. 1132 if (Mask == MaskedOff) { 1133 SDValue Cmp = SDValue( 1134 CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}), 1135 0); 1136 ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOpcode, DL, VT, 1137 {Mask, Cmp, VL, MaskSEW})); 1138 return; 1139 } 1140 1141 // Mask needs to be copied to V0. 1142 SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, 1143 RISCV::V0, Mask, SDValue()); 1144 SDValue Glue = Chain.getValue(1); 1145 SDValue V0 = CurDAG->getRegister(RISCV::V0, VT); 1146 1147 // Otherwise use 1148 // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0 1149 // The result is mask undisturbed. 1150 // We use the same instructions to emulate mask agnostic behavior, because 1151 // the agnostic result can be either undisturbed or all 1. 1152 SDValue Cmp = SDValue( 1153 CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT, 1154 {MaskedOff, Src1, Src2, V0, VL, SEW, Glue}), 1155 0); 1156 // vmxor.mm vd, vd, v0 is used to update active value. 1157 ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT, 1158 {Cmp, Mask, VL, MaskSEW})); 1159 return; 1160 } 1161 case Intrinsic::riscv_vsetvli_opt: 1162 case Intrinsic::riscv_vsetvlimax_opt: 1163 return selectVSETVLI(Node); 1164 } 1165 break; 1166 } 1167 case ISD::INTRINSIC_W_CHAIN: { 1168 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 1169 switch (IntNo) { 1170 // By default we do not custom select any intrinsic. 1171 default: 1172 break; 1173 case Intrinsic::riscv_vsetvli: 1174 case Intrinsic::riscv_vsetvlimax: 1175 return selectVSETVLI(Node); 1176 case Intrinsic::riscv_vlseg2: 1177 case Intrinsic::riscv_vlseg3: 1178 case Intrinsic::riscv_vlseg4: 1179 case Intrinsic::riscv_vlseg5: 1180 case Intrinsic::riscv_vlseg6: 1181 case Intrinsic::riscv_vlseg7: 1182 case Intrinsic::riscv_vlseg8: { 1183 selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false); 1184 return; 1185 } 1186 case Intrinsic::riscv_vlseg2_mask: 1187 case Intrinsic::riscv_vlseg3_mask: 1188 case Intrinsic::riscv_vlseg4_mask: 1189 case Intrinsic::riscv_vlseg5_mask: 1190 case Intrinsic::riscv_vlseg6_mask: 1191 case Intrinsic::riscv_vlseg7_mask: 1192 case Intrinsic::riscv_vlseg8_mask: { 1193 selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false); 1194 return; 1195 } 1196 case Intrinsic::riscv_vlsseg2: 1197 case Intrinsic::riscv_vlsseg3: 1198 case Intrinsic::riscv_vlsseg4: 1199 case Intrinsic::riscv_vlsseg5: 1200 case Intrinsic::riscv_vlsseg6: 1201 case Intrinsic::riscv_vlsseg7: 1202 case Intrinsic::riscv_vlsseg8: { 1203 selectVLSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true); 1204 return; 1205 } 1206 case Intrinsic::riscv_vlsseg2_mask: 1207 case Intrinsic::riscv_vlsseg3_mask: 1208 case Intrinsic::riscv_vlsseg4_mask: 1209 case Intrinsic::riscv_vlsseg5_mask: 1210 case Intrinsic::riscv_vlsseg6_mask: 1211 case Intrinsic::riscv_vlsseg7_mask: 1212 case Intrinsic::riscv_vlsseg8_mask: { 1213 selectVLSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true); 1214 return; 1215 } 1216 case Intrinsic::riscv_vloxseg2: 1217 case Intrinsic::riscv_vloxseg3: 1218 case Intrinsic::riscv_vloxseg4: 1219 case Intrinsic::riscv_vloxseg5: 1220 case Intrinsic::riscv_vloxseg6: 1221 case Intrinsic::riscv_vloxseg7: 1222 case Intrinsic::riscv_vloxseg8: 1223 selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true); 1224 return; 1225 case Intrinsic::riscv_vluxseg2: 1226 case Intrinsic::riscv_vluxseg3: 1227 case Intrinsic::riscv_vluxseg4: 1228 case Intrinsic::riscv_vluxseg5: 1229 case Intrinsic::riscv_vluxseg6: 1230 case Intrinsic::riscv_vluxseg7: 1231 case Intrinsic::riscv_vluxseg8: 1232 selectVLXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false); 1233 return; 1234 case Intrinsic::riscv_vloxseg2_mask: 1235 case Intrinsic::riscv_vloxseg3_mask: 1236 case Intrinsic::riscv_vloxseg4_mask: 1237 case Intrinsic::riscv_vloxseg5_mask: 1238 case Intrinsic::riscv_vloxseg6_mask: 1239 case Intrinsic::riscv_vloxseg7_mask: 1240 case Intrinsic::riscv_vloxseg8_mask: 1241 selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true); 1242 return; 1243 case Intrinsic::riscv_vluxseg2_mask: 1244 case Intrinsic::riscv_vluxseg3_mask: 1245 case Intrinsic::riscv_vluxseg4_mask: 1246 case Intrinsic::riscv_vluxseg5_mask: 1247 case Intrinsic::riscv_vluxseg6_mask: 1248 case Intrinsic::riscv_vluxseg7_mask: 1249 case Intrinsic::riscv_vluxseg8_mask: 1250 selectVLXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false); 1251 return; 1252 case Intrinsic::riscv_vlseg8ff: 1253 case Intrinsic::riscv_vlseg7ff: 1254 case Intrinsic::riscv_vlseg6ff: 1255 case Intrinsic::riscv_vlseg5ff: 1256 case Intrinsic::riscv_vlseg4ff: 1257 case Intrinsic::riscv_vlseg3ff: 1258 case Intrinsic::riscv_vlseg2ff: { 1259 selectVLSEGFF(Node, /*IsMasked*/ false); 1260 return; 1261 } 1262 case Intrinsic::riscv_vlseg8ff_mask: 1263 case Intrinsic::riscv_vlseg7ff_mask: 1264 case Intrinsic::riscv_vlseg6ff_mask: 1265 case Intrinsic::riscv_vlseg5ff_mask: 1266 case Intrinsic::riscv_vlseg4ff_mask: 1267 case Intrinsic::riscv_vlseg3ff_mask: 1268 case Intrinsic::riscv_vlseg2ff_mask: { 1269 selectVLSEGFF(Node, /*IsMasked*/ true); 1270 return; 1271 } 1272 case Intrinsic::riscv_vloxei: 1273 case Intrinsic::riscv_vloxei_mask: 1274 case Intrinsic::riscv_vluxei: 1275 case Intrinsic::riscv_vluxei_mask: { 1276 bool IsMasked = IntNo == Intrinsic::riscv_vloxei_mask || 1277 IntNo == Intrinsic::riscv_vluxei_mask; 1278 bool IsOrdered = IntNo == Intrinsic::riscv_vloxei || 1279 IntNo == Intrinsic::riscv_vloxei_mask; 1280 1281 MVT VT = Node->getSimpleValueType(0); 1282 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 1283 1284 unsigned CurOp = 2; 1285 // Masked intrinsic only have TU version pseduo instructions. 1286 bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef(); 1287 SmallVector<SDValue, 8> Operands; 1288 if (IsTU) 1289 Operands.push_back(Node->getOperand(CurOp++)); 1290 else 1291 // Skip the undef passthru operand for nomask TA version pseudo 1292 CurOp++; 1293 1294 MVT IndexVT; 1295 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, 1296 /*IsStridedOrIndexed*/ true, Operands, 1297 /*IsLoad=*/true, &IndexVT); 1298 1299 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 1300 "Element count mismatch"); 1301 1302 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 1303 RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); 1304 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); 1305 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { 1306 report_fatal_error("The V extension does not support EEW=64 for index " 1307 "values when XLEN=32"); 1308 } 1309 const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo( 1310 IsMasked, IsTU, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL), 1311 static_cast<unsigned>(IndexLMUL)); 1312 MachineSDNode *Load = 1313 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); 1314 1315 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 1316 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); 1317 1318 ReplaceNode(Node, Load); 1319 return; 1320 } 1321 case Intrinsic::riscv_vlm: 1322 case Intrinsic::riscv_vle: 1323 case Intrinsic::riscv_vle_mask: 1324 case Intrinsic::riscv_vlse: 1325 case Intrinsic::riscv_vlse_mask: { 1326 bool IsMasked = IntNo == Intrinsic::riscv_vle_mask || 1327 IntNo == Intrinsic::riscv_vlse_mask; 1328 bool IsStrided = 1329 IntNo == Intrinsic::riscv_vlse || IntNo == Intrinsic::riscv_vlse_mask; 1330 1331 MVT VT = Node->getSimpleValueType(0); 1332 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 1333 1334 unsigned CurOp = 2; 1335 // The riscv_vlm intrinsic are always tail agnostic and no passthru operand. 1336 bool HasPassthruOperand = IntNo != Intrinsic::riscv_vlm; 1337 // Masked intrinsic only have TU version pseduo instructions. 1338 bool IsTU = HasPassthruOperand && 1339 (IsMasked || !Node->getOperand(CurOp).isUndef()); 1340 SmallVector<SDValue, 8> Operands; 1341 if (IsTU) 1342 Operands.push_back(Node->getOperand(CurOp++)); 1343 else if (HasPassthruOperand) 1344 // Skip the undef passthru operand for nomask TA version pseudo 1345 CurOp++; 1346 1347 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, 1348 Operands, /*IsLoad=*/true); 1349 1350 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 1351 const RISCV::VLEPseudo *P = 1352 RISCV::getVLEPseudo(IsMasked, IsTU, IsStrided, /*FF*/ false, Log2SEW, 1353 static_cast<unsigned>(LMUL)); 1354 MachineSDNode *Load = 1355 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); 1356 1357 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 1358 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); 1359 1360 ReplaceNode(Node, Load); 1361 return; 1362 } 1363 case Intrinsic::riscv_vleff: 1364 case Intrinsic::riscv_vleff_mask: { 1365 bool IsMasked = IntNo == Intrinsic::riscv_vleff_mask; 1366 1367 MVT VT = Node->getSimpleValueType(0); 1368 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 1369 1370 unsigned CurOp = 2; 1371 // Masked intrinsic only have TU version pseduo instructions. 1372 bool IsTU = IsMasked || !Node->getOperand(CurOp).isUndef(); 1373 SmallVector<SDValue, 7> Operands; 1374 if (IsTU) 1375 Operands.push_back(Node->getOperand(CurOp++)); 1376 else 1377 // Skip the undef passthru operand for nomask TA version pseudo 1378 CurOp++; 1379 1380 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, 1381 /*IsStridedOrIndexed*/ false, Operands, 1382 /*IsLoad=*/true); 1383 1384 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 1385 const RISCV::VLEPseudo *P = 1386 RISCV::getVLEPseudo(IsMasked, IsTU, /*Strided*/ false, /*FF*/ true, 1387 Log2SEW, static_cast<unsigned>(LMUL)); 1388 MachineSDNode *Load = CurDAG->getMachineNode( 1389 P->Pseudo, DL, Node->getVTList(), Operands); 1390 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 1391 CurDAG->setNodeMemRefs(Load, {MemOp->getMemOperand()}); 1392 1393 ReplaceNode(Node, Load); 1394 return; 1395 } 1396 } 1397 break; 1398 } 1399 case ISD::INTRINSIC_VOID: { 1400 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 1401 switch (IntNo) { 1402 case Intrinsic::riscv_vsseg2: 1403 case Intrinsic::riscv_vsseg3: 1404 case Intrinsic::riscv_vsseg4: 1405 case Intrinsic::riscv_vsseg5: 1406 case Intrinsic::riscv_vsseg6: 1407 case Intrinsic::riscv_vsseg7: 1408 case Intrinsic::riscv_vsseg8: { 1409 selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ false); 1410 return; 1411 } 1412 case Intrinsic::riscv_vsseg2_mask: 1413 case Intrinsic::riscv_vsseg3_mask: 1414 case Intrinsic::riscv_vsseg4_mask: 1415 case Intrinsic::riscv_vsseg5_mask: 1416 case Intrinsic::riscv_vsseg6_mask: 1417 case Intrinsic::riscv_vsseg7_mask: 1418 case Intrinsic::riscv_vsseg8_mask: { 1419 selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ false); 1420 return; 1421 } 1422 case Intrinsic::riscv_vssseg2: 1423 case Intrinsic::riscv_vssseg3: 1424 case Intrinsic::riscv_vssseg4: 1425 case Intrinsic::riscv_vssseg5: 1426 case Intrinsic::riscv_vssseg6: 1427 case Intrinsic::riscv_vssseg7: 1428 case Intrinsic::riscv_vssseg8: { 1429 selectVSSEG(Node, /*IsMasked*/ false, /*IsStrided*/ true); 1430 return; 1431 } 1432 case Intrinsic::riscv_vssseg2_mask: 1433 case Intrinsic::riscv_vssseg3_mask: 1434 case Intrinsic::riscv_vssseg4_mask: 1435 case Intrinsic::riscv_vssseg5_mask: 1436 case Intrinsic::riscv_vssseg6_mask: 1437 case Intrinsic::riscv_vssseg7_mask: 1438 case Intrinsic::riscv_vssseg8_mask: { 1439 selectVSSEG(Node, /*IsMasked*/ true, /*IsStrided*/ true); 1440 return; 1441 } 1442 case Intrinsic::riscv_vsoxseg2: 1443 case Intrinsic::riscv_vsoxseg3: 1444 case Intrinsic::riscv_vsoxseg4: 1445 case Intrinsic::riscv_vsoxseg5: 1446 case Intrinsic::riscv_vsoxseg6: 1447 case Intrinsic::riscv_vsoxseg7: 1448 case Intrinsic::riscv_vsoxseg8: 1449 selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ true); 1450 return; 1451 case Intrinsic::riscv_vsuxseg2: 1452 case Intrinsic::riscv_vsuxseg3: 1453 case Intrinsic::riscv_vsuxseg4: 1454 case Intrinsic::riscv_vsuxseg5: 1455 case Intrinsic::riscv_vsuxseg6: 1456 case Intrinsic::riscv_vsuxseg7: 1457 case Intrinsic::riscv_vsuxseg8: 1458 selectVSXSEG(Node, /*IsMasked*/ false, /*IsOrdered*/ false); 1459 return; 1460 case Intrinsic::riscv_vsoxseg2_mask: 1461 case Intrinsic::riscv_vsoxseg3_mask: 1462 case Intrinsic::riscv_vsoxseg4_mask: 1463 case Intrinsic::riscv_vsoxseg5_mask: 1464 case Intrinsic::riscv_vsoxseg6_mask: 1465 case Intrinsic::riscv_vsoxseg7_mask: 1466 case Intrinsic::riscv_vsoxseg8_mask: 1467 selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ true); 1468 return; 1469 case Intrinsic::riscv_vsuxseg2_mask: 1470 case Intrinsic::riscv_vsuxseg3_mask: 1471 case Intrinsic::riscv_vsuxseg4_mask: 1472 case Intrinsic::riscv_vsuxseg5_mask: 1473 case Intrinsic::riscv_vsuxseg6_mask: 1474 case Intrinsic::riscv_vsuxseg7_mask: 1475 case Intrinsic::riscv_vsuxseg8_mask: 1476 selectVSXSEG(Node, /*IsMasked*/ true, /*IsOrdered*/ false); 1477 return; 1478 case Intrinsic::riscv_vsoxei: 1479 case Intrinsic::riscv_vsoxei_mask: 1480 case Intrinsic::riscv_vsuxei: 1481 case Intrinsic::riscv_vsuxei_mask: { 1482 bool IsMasked = IntNo == Intrinsic::riscv_vsoxei_mask || 1483 IntNo == Intrinsic::riscv_vsuxei_mask; 1484 bool IsOrdered = IntNo == Intrinsic::riscv_vsoxei || 1485 IntNo == Intrinsic::riscv_vsoxei_mask; 1486 1487 MVT VT = Node->getOperand(2)->getSimpleValueType(0); 1488 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 1489 1490 unsigned CurOp = 2; 1491 SmallVector<SDValue, 8> Operands; 1492 Operands.push_back(Node->getOperand(CurOp++)); // Store value. 1493 1494 MVT IndexVT; 1495 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, 1496 /*IsStridedOrIndexed*/ true, Operands, 1497 /*IsLoad=*/false, &IndexVT); 1498 1499 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && 1500 "Element count mismatch"); 1501 1502 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 1503 RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); 1504 unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); 1505 if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { 1506 report_fatal_error("The V extension does not support EEW=64 for index " 1507 "values when XLEN=32"); 1508 } 1509 const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo( 1510 IsMasked, /*TU*/ false, IsOrdered, IndexLog2EEW, 1511 static_cast<unsigned>(LMUL), static_cast<unsigned>(IndexLMUL)); 1512 MachineSDNode *Store = 1513 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); 1514 1515 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 1516 CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()}); 1517 1518 ReplaceNode(Node, Store); 1519 return; 1520 } 1521 case Intrinsic::riscv_vsm: 1522 case Intrinsic::riscv_vse: 1523 case Intrinsic::riscv_vse_mask: 1524 case Intrinsic::riscv_vsse: 1525 case Intrinsic::riscv_vsse_mask: { 1526 bool IsMasked = IntNo == Intrinsic::riscv_vse_mask || 1527 IntNo == Intrinsic::riscv_vsse_mask; 1528 bool IsStrided = 1529 IntNo == Intrinsic::riscv_vsse || IntNo == Intrinsic::riscv_vsse_mask; 1530 1531 MVT VT = Node->getOperand(2)->getSimpleValueType(0); 1532 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 1533 1534 unsigned CurOp = 2; 1535 SmallVector<SDValue, 8> Operands; 1536 Operands.push_back(Node->getOperand(CurOp++)); // Store value. 1537 1538 addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, 1539 Operands); 1540 1541 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 1542 const RISCV::VSEPseudo *P = RISCV::getVSEPseudo( 1543 IsMasked, IsStrided, Log2SEW, static_cast<unsigned>(LMUL)); 1544 MachineSDNode *Store = 1545 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); 1546 if (auto *MemOp = dyn_cast<MemSDNode>(Node)) 1547 CurDAG->setNodeMemRefs(Store, {MemOp->getMemOperand()}); 1548 1549 ReplaceNode(Node, Store); 1550 return; 1551 } 1552 } 1553 break; 1554 } 1555 case ISD::BITCAST: { 1556 MVT SrcVT = Node->getOperand(0).getSimpleValueType(); 1557 // Just drop bitcasts between vectors if both are fixed or both are 1558 // scalable. 1559 if ((VT.isScalableVector() && SrcVT.isScalableVector()) || 1560 (VT.isFixedLengthVector() && SrcVT.isFixedLengthVector())) { 1561 ReplaceUses(SDValue(Node, 0), Node->getOperand(0)); 1562 CurDAG->RemoveDeadNode(Node); 1563 return; 1564 } 1565 break; 1566 } 1567 case ISD::INSERT_SUBVECTOR: { 1568 SDValue V = Node->getOperand(0); 1569 SDValue SubV = Node->getOperand(1); 1570 SDLoc DL(SubV); 1571 auto Idx = Node->getConstantOperandVal(2); 1572 MVT SubVecVT = SubV.getSimpleValueType(); 1573 1574 const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering(); 1575 MVT SubVecContainerVT = SubVecVT; 1576 // Establish the correct scalable-vector types for any fixed-length type. 1577 if (SubVecVT.isFixedLengthVector()) 1578 SubVecContainerVT = TLI.getContainerForFixedLengthVector(SubVecVT); 1579 if (VT.isFixedLengthVector()) 1580 VT = TLI.getContainerForFixedLengthVector(VT); 1581 1582 const auto *TRI = Subtarget->getRegisterInfo(); 1583 unsigned SubRegIdx; 1584 std::tie(SubRegIdx, Idx) = 1585 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 1586 VT, SubVecContainerVT, Idx, TRI); 1587 1588 // If the Idx hasn't been completely eliminated then this is a subvector 1589 // insert which doesn't naturally align to a vector register. These must 1590 // be handled using instructions to manipulate the vector registers. 1591 if (Idx != 0) 1592 break; 1593 1594 RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT); 1595 bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 || 1596 SubVecLMUL == RISCVII::VLMUL::LMUL_F4 || 1597 SubVecLMUL == RISCVII::VLMUL::LMUL_F8; 1598 (void)IsSubVecPartReg; // Silence unused variable warning without asserts. 1599 assert((!IsSubVecPartReg || V.isUndef()) && 1600 "Expecting lowering to have created legal INSERT_SUBVECTORs when " 1601 "the subvector is smaller than a full-sized register"); 1602 1603 // If we haven't set a SubRegIdx, then we must be going between 1604 // equally-sized LMUL groups (e.g. VR -> VR). This can be done as a copy. 1605 if (SubRegIdx == RISCV::NoSubRegister) { 1606 unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(VT); 1607 assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) == 1608 InRegClassID && 1609 "Unexpected subvector extraction"); 1610 SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT); 1611 SDNode *NewNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 1612 DL, VT, SubV, RC); 1613 ReplaceNode(Node, NewNode); 1614 return; 1615 } 1616 1617 SDValue Insert = CurDAG->getTargetInsertSubreg(SubRegIdx, DL, VT, V, SubV); 1618 ReplaceNode(Node, Insert.getNode()); 1619 return; 1620 } 1621 case ISD::EXTRACT_SUBVECTOR: { 1622 SDValue V = Node->getOperand(0); 1623 auto Idx = Node->getConstantOperandVal(1); 1624 MVT InVT = V.getSimpleValueType(); 1625 SDLoc DL(V); 1626 1627 const RISCVTargetLowering &TLI = *Subtarget->getTargetLowering(); 1628 MVT SubVecContainerVT = VT; 1629 // Establish the correct scalable-vector types for any fixed-length type. 1630 if (VT.isFixedLengthVector()) 1631 SubVecContainerVT = TLI.getContainerForFixedLengthVector(VT); 1632 if (InVT.isFixedLengthVector()) 1633 InVT = TLI.getContainerForFixedLengthVector(InVT); 1634 1635 const auto *TRI = Subtarget->getRegisterInfo(); 1636 unsigned SubRegIdx; 1637 std::tie(SubRegIdx, Idx) = 1638 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs( 1639 InVT, SubVecContainerVT, Idx, TRI); 1640 1641 // If the Idx hasn't been completely eliminated then this is a subvector 1642 // extract which doesn't naturally align to a vector register. These must 1643 // be handled using instructions to manipulate the vector registers. 1644 if (Idx != 0) 1645 break; 1646 1647 // If we haven't set a SubRegIdx, then we must be going between 1648 // equally-sized LMUL types (e.g. VR -> VR). This can be done as a copy. 1649 if (SubRegIdx == RISCV::NoSubRegister) { 1650 unsigned InRegClassID = RISCVTargetLowering::getRegClassIDForVecVT(InVT); 1651 assert(RISCVTargetLowering::getRegClassIDForVecVT(SubVecContainerVT) == 1652 InRegClassID && 1653 "Unexpected subvector extraction"); 1654 SDValue RC = CurDAG->getTargetConstant(InRegClassID, DL, XLenVT); 1655 SDNode *NewNode = 1656 CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, DL, VT, V, RC); 1657 ReplaceNode(Node, NewNode); 1658 return; 1659 } 1660 1661 SDValue Extract = CurDAG->getTargetExtractSubreg(SubRegIdx, DL, VT, V); 1662 ReplaceNode(Node, Extract.getNode()); 1663 return; 1664 } 1665 case ISD::SPLAT_VECTOR: 1666 case RISCVISD::VMV_S_X_VL: 1667 case RISCVISD::VFMV_S_F_VL: 1668 case RISCVISD::VMV_V_X_VL: 1669 case RISCVISD::VFMV_V_F_VL: { 1670 // Try to match splat of a scalar load to a strided load with stride of x0. 1671 bool IsScalarMove = Node->getOpcode() == RISCVISD::VMV_S_X_VL || 1672 Node->getOpcode() == RISCVISD::VFMV_S_F_VL; 1673 bool HasPassthruOperand = Node->getOpcode() != ISD::SPLAT_VECTOR; 1674 if (HasPassthruOperand && !Node->getOperand(0).isUndef()) 1675 break; 1676 SDValue Src = HasPassthruOperand ? Node->getOperand(1) : Node->getOperand(0); 1677 auto *Ld = dyn_cast<LoadSDNode>(Src); 1678 if (!Ld) 1679 break; 1680 EVT MemVT = Ld->getMemoryVT(); 1681 // The memory VT should be the same size as the element type. 1682 if (MemVT.getStoreSize() != VT.getVectorElementType().getStoreSize()) 1683 break; 1684 if (!IsProfitableToFold(Src, Node, Node) || 1685 !IsLegalToFold(Src, Node, Node, TM.getOptLevel())) 1686 break; 1687 1688 SDValue VL; 1689 if (Node->getOpcode() == ISD::SPLAT_VECTOR) 1690 VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT); 1691 else if (IsScalarMove) { 1692 // We could deal with more VL if we update the VSETVLI insert pass to 1693 // avoid introducing more VSETVLI. 1694 if (!isOneConstant(Node->getOperand(2))) 1695 break; 1696 selectVLOp(Node->getOperand(2), VL); 1697 } else 1698 selectVLOp(Node->getOperand(2), VL); 1699 1700 unsigned Log2SEW = Log2_32(VT.getScalarSizeInBits()); 1701 SDValue SEW = CurDAG->getTargetConstant(Log2SEW, DL, XLenVT); 1702 1703 SDValue Operands[] = {Ld->getBasePtr(), 1704 CurDAG->getRegister(RISCV::X0, XLenVT), VL, SEW, 1705 Ld->getChain()}; 1706 1707 RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); 1708 const RISCV::VLEPseudo *P = RISCV::getVLEPseudo( 1709 /*IsMasked*/ false, /*IsTU*/ false, /*IsStrided*/ true, /*FF*/ false, 1710 Log2SEW, static_cast<unsigned>(LMUL)); 1711 MachineSDNode *Load = 1712 CurDAG->getMachineNode(P->Pseudo, DL, Node->getVTList(), Operands); 1713 1714 CurDAG->setNodeMemRefs(Load, {Ld->getMemOperand()}); 1715 1716 ReplaceNode(Node, Load); 1717 return; 1718 } 1719 } 1720 1721 // Select the default instruction. 1722 SelectCode(Node); 1723 } 1724 1725 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand( 1726 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) { 1727 switch (ConstraintID) { 1728 case InlineAsm::Constraint_m: 1729 // We just support simple memory operands that have a single address 1730 // operand and need no special handling. 1731 OutOps.push_back(Op); 1732 return false; 1733 case InlineAsm::Constraint_A: 1734 OutOps.push_back(Op); 1735 return false; 1736 default: 1737 break; 1738 } 1739 1740 return true; 1741 } 1742 1743 bool RISCVDAGToDAGISel::SelectAddrFrameIndex(SDValue Addr, SDValue &Base, 1744 SDValue &Offset) { 1745 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { 1746 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT()); 1747 Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), Subtarget->getXLenVT()); 1748 return true; 1749 } 1750 1751 return false; 1752 } 1753 1754 // Select a frame index and an optional immediate offset from an ADD or OR. 1755 bool RISCVDAGToDAGISel::SelectFrameAddrRegImm(SDValue Addr, SDValue &Base, 1756 SDValue &Offset) { 1757 if (SelectAddrFrameIndex(Addr, Base, Offset)) 1758 return true; 1759 1760 if (!CurDAG->isBaseWithConstantOffset(Addr)) 1761 return false; 1762 1763 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) { 1764 int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue(); 1765 if (isInt<12>(CVal)) { 1766 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), 1767 Subtarget->getXLenVT()); 1768 Offset = CurDAG->getTargetConstant(CVal, SDLoc(Addr), 1769 Subtarget->getXLenVT()); 1770 return true; 1771 } 1772 } 1773 1774 return false; 1775 } 1776 1777 // Fold constant addresses. 1778 static bool selectConstantAddr(SelectionDAG *CurDAG, const SDLoc &DL, 1779 const MVT VT, const RISCVSubtarget *Subtarget, 1780 SDValue Addr, SDValue &Base, SDValue &Offset) { 1781 if (!isa<ConstantSDNode>(Addr)) 1782 return false; 1783 1784 int64_t CVal = cast<ConstantSDNode>(Addr)->getSExtValue(); 1785 1786 // If the constant is a simm12, we can fold the whole constant and use X0 as 1787 // the base. If the constant can be materialized with LUI+simm12, use LUI as 1788 // the base. We can't use generateInstSeq because it favors LUI+ADDIW. 1789 int64_t Lo12 = SignExtend64<12>(CVal); 1790 int64_t Hi = (uint64_t)CVal - (uint64_t)Lo12; 1791 if (!Subtarget->is64Bit() || isInt<32>(Hi)) { 1792 if (Hi) { 1793 int64_t Hi20 = (Hi >> 12) & 0xfffff; 1794 Base = SDValue( 1795 CurDAG->getMachineNode(RISCV::LUI, DL, VT, 1796 CurDAG->getTargetConstant(Hi20, DL, VT)), 1797 0); 1798 } else { 1799 Base = CurDAG->getRegister(RISCV::X0, VT); 1800 } 1801 Offset = CurDAG->getTargetConstant(Lo12, DL, VT); 1802 return true; 1803 } 1804 1805 // Ask how constant materialization would handle this constant. 1806 RISCVMatInt::InstSeq Seq = 1807 RISCVMatInt::generateInstSeq(CVal, Subtarget->getFeatureBits()); 1808 1809 // If the last instruction would be an ADDI, we can fold its immediate and 1810 // emit the rest of the sequence as the base. 1811 if (Seq.back().Opc != RISCV::ADDI) 1812 return false; 1813 Lo12 = Seq.back().Imm; 1814 1815 // Drop the last instruction. 1816 Seq.pop_back(); 1817 assert(!Seq.empty() && "Expected more instructions in sequence"); 1818 1819 Base = SDValue(selectImmSeq(CurDAG, DL, VT, Seq), 0); 1820 Offset = CurDAG->getTargetConstant(Lo12, DL, VT); 1821 return true; 1822 } 1823 1824 // Is this ADD instruction only used as the base pointer of scalar loads and 1825 // stores? 1826 static bool isWorthFoldingAdd(SDValue Add) { 1827 for (auto Use : Add->uses()) { 1828 if (Use->getOpcode() != ISD::LOAD && Use->getOpcode() != ISD::STORE && 1829 Use->getOpcode() != ISD::ATOMIC_LOAD && 1830 Use->getOpcode() != ISD::ATOMIC_STORE) 1831 return false; 1832 EVT VT = cast<MemSDNode>(Use)->getMemoryVT(); 1833 if (!VT.isScalarInteger() && VT != MVT::f16 && VT != MVT::f32 && 1834 VT != MVT::f64) 1835 return false; 1836 // Don't allow stores of the value. It must be used as the address. 1837 if (Use->getOpcode() == ISD::STORE && 1838 cast<StoreSDNode>(Use)->getValue() == Add) 1839 return false; 1840 if (Use->getOpcode() == ISD::ATOMIC_STORE && 1841 cast<AtomicSDNode>(Use)->getVal() == Add) 1842 return false; 1843 } 1844 1845 return true; 1846 } 1847 1848 bool RISCVDAGToDAGISel::SelectAddrRegImm(SDValue Addr, SDValue &Base, 1849 SDValue &Offset) { 1850 if (SelectAddrFrameIndex(Addr, Base, Offset)) 1851 return true; 1852 1853 SDLoc DL(Addr); 1854 MVT VT = Addr.getSimpleValueType(); 1855 1856 if (Addr.getOpcode() == RISCVISD::ADD_LO) { 1857 Base = Addr.getOperand(0); 1858 Offset = Addr.getOperand(1); 1859 return true; 1860 } 1861 1862 if (CurDAG->isBaseWithConstantOffset(Addr)) { 1863 int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue(); 1864 if (isInt<12>(CVal)) { 1865 Base = Addr.getOperand(0); 1866 if (Base.getOpcode() == RISCVISD::ADD_LO) { 1867 SDValue LoOperand = Base.getOperand(1); 1868 if (auto *GA = dyn_cast<GlobalAddressSDNode>(LoOperand)) { 1869 // If the Lo in (ADD_LO hi, lo) is a global variable's address 1870 // (its low part, really), then we can rely on the alignment of that 1871 // variable to provide a margin of safety before low part can overflow 1872 // the 12 bits of the load/store offset. Check if CVal falls within 1873 // that margin; if so (low part + CVal) can't overflow. 1874 const DataLayout &DL = CurDAG->getDataLayout(); 1875 Align Alignment = commonAlignment( 1876 GA->getGlobal()->getPointerAlignment(DL), GA->getOffset()); 1877 if (CVal == 0 || Alignment > CVal) { 1878 int64_t CombinedOffset = CVal + GA->getOffset(); 1879 Base = Base.getOperand(0); 1880 Offset = CurDAG->getTargetGlobalAddress( 1881 GA->getGlobal(), SDLoc(LoOperand), LoOperand.getValueType(), 1882 CombinedOffset, GA->getTargetFlags()); 1883 return true; 1884 } 1885 } 1886 } 1887 1888 if (auto *FIN = dyn_cast<FrameIndexSDNode>(Base)) 1889 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), VT); 1890 Offset = CurDAG->getTargetConstant(CVal, DL, VT); 1891 return true; 1892 } 1893 } 1894 1895 // Handle ADD with large immediates. 1896 if (Addr.getOpcode() == ISD::ADD && isa<ConstantSDNode>(Addr.getOperand(1))) { 1897 int64_t CVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue(); 1898 assert(!isInt<12>(CVal) && "simm12 not already handled?"); 1899 1900 // Handle immediates in the range [-4096,-2049] or [2048, 4094]. We can use 1901 // an ADDI for part of the offset and fold the rest into the load/store. 1902 // This mirrors the AddiPair PatFrag in RISCVInstrInfo.td. 1903 if (isInt<12>(CVal / 2) && isInt<12>(CVal - CVal / 2)) { 1904 int64_t Adj = CVal < 0 ? -2048 : 2047; 1905 Base = SDValue( 1906 CurDAG->getMachineNode(RISCV::ADDI, DL, VT, Addr.getOperand(0), 1907 CurDAG->getTargetConstant(Adj, DL, VT)), 1908 0); 1909 Offset = CurDAG->getTargetConstant(CVal - Adj, DL, VT); 1910 return true; 1911 } 1912 1913 // For larger immediates, we might be able to save one instruction from 1914 // constant materialization by folding the Lo12 bits of the immediate into 1915 // the address. We should only do this if the ADD is only used by loads and 1916 // stores that can fold the lo12 bits. Otherwise, the ADD will get iseled 1917 // separately with the full materialized immediate creating extra 1918 // instructions. 1919 if (isWorthFoldingAdd(Addr) && 1920 selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr.getOperand(1), Base, 1921 Offset)) { 1922 // Insert an ADD instruction with the materialized Hi52 bits. 1923 Base = SDValue( 1924 CurDAG->getMachineNode(RISCV::ADD, DL, VT, Addr.getOperand(0), Base), 1925 0); 1926 return true; 1927 } 1928 } 1929 1930 if (selectConstantAddr(CurDAG, DL, VT, Subtarget, Addr, Base, Offset)) 1931 return true; 1932 1933 Base = Addr; 1934 Offset = CurDAG->getTargetConstant(0, DL, VT); 1935 return true; 1936 } 1937 1938 bool RISCVDAGToDAGISel::selectShiftMask(SDValue N, unsigned ShiftWidth, 1939 SDValue &ShAmt) { 1940 // Shift instructions on RISCV only read the lower 5 or 6 bits of the shift 1941 // amount. If there is an AND on the shift amount, we can bypass it if it 1942 // doesn't affect any of those bits. 1943 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) { 1944 const APInt &AndMask = N->getConstantOperandAPInt(1); 1945 1946 // Since the max shift amount is a power of 2 we can subtract 1 to make a 1947 // mask that covers the bits needed to represent all shift amounts. 1948 assert(isPowerOf2_32(ShiftWidth) && "Unexpected max shift amount!"); 1949 APInt ShMask(AndMask.getBitWidth(), ShiftWidth - 1); 1950 1951 if (ShMask.isSubsetOf(AndMask)) { 1952 ShAmt = N.getOperand(0); 1953 return true; 1954 } 1955 1956 // SimplifyDemandedBits may have optimized the mask so try restoring any 1957 // bits that are known zero. 1958 KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0)); 1959 if (ShMask.isSubsetOf(AndMask | Known.Zero)) { 1960 ShAmt = N.getOperand(0); 1961 return true; 1962 } 1963 } else if (N.getOpcode() == ISD::SUB && 1964 isa<ConstantSDNode>(N.getOperand(0))) { 1965 uint64_t Imm = N.getConstantOperandVal(0); 1966 // If we are shifting by N-X where N == 0 mod Size, then just shift by -X to 1967 // generate a NEG instead of a SUB of a constant. 1968 if (Imm != 0 && Imm % ShiftWidth == 0) { 1969 SDLoc DL(N); 1970 EVT VT = N.getValueType(); 1971 SDValue Zero = CurDAG->getRegister(RISCV::X0, VT); 1972 unsigned NegOpc = VT == MVT::i64 ? RISCV::SUBW : RISCV::SUB; 1973 MachineSDNode *Neg = CurDAG->getMachineNode(NegOpc, DL, VT, Zero, 1974 N.getOperand(1)); 1975 ShAmt = SDValue(Neg, 0); 1976 return true; 1977 } 1978 } 1979 1980 ShAmt = N; 1981 return true; 1982 } 1983 1984 bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) { 1985 if (N.getOpcode() == ISD::SIGN_EXTEND_INREG && 1986 cast<VTSDNode>(N.getOperand(1))->getVT() == MVT::i32) { 1987 Val = N.getOperand(0); 1988 return true; 1989 } 1990 MVT VT = N.getSimpleValueType(); 1991 if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) { 1992 Val = N; 1993 return true; 1994 } 1995 1996 return false; 1997 } 1998 1999 bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) { 2000 if (N.getOpcode() == ISD::AND) { 2001 auto *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 2002 if (C && C->getZExtValue() == UINT64_C(0xFFFFFFFF)) { 2003 Val = N.getOperand(0); 2004 return true; 2005 } 2006 } 2007 MVT VT = N.getSimpleValueType(); 2008 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32); 2009 if (CurDAG->MaskedValueIsZero(N, Mask)) { 2010 Val = N; 2011 return true; 2012 } 2013 2014 return false; 2015 } 2016 2017 /// Look for various patterns that can be done with a SHL that can be folded 2018 /// into a SHXADD. \p ShAmt contains 1, 2, or 3 and is set based on which 2019 /// SHXADD we are trying to match. 2020 bool RISCVDAGToDAGISel::selectSHXADDOp(SDValue N, unsigned ShAmt, 2021 SDValue &Val) { 2022 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) { 2023 SDValue N0 = N.getOperand(0); 2024 2025 bool LeftShift = N0.getOpcode() == ISD::SHL; 2026 if ((LeftShift || N0.getOpcode() == ISD::SRL) && 2027 isa<ConstantSDNode>(N0.getOperand(1))) { 2028 uint64_t Mask = N.getConstantOperandVal(1); 2029 unsigned C2 = N0.getConstantOperandVal(1); 2030 2031 unsigned XLen = Subtarget->getXLen(); 2032 if (LeftShift) 2033 Mask &= maskTrailingZeros<uint64_t>(C2); 2034 else 2035 Mask &= maskTrailingOnes<uint64_t>(XLen - C2); 2036 2037 // Look for (and (shl y, c2), c1) where c1 is a shifted mask with no 2038 // leading zeros and c3 trailing zeros. We can use an SRLI by c2+c3 2039 // followed by a SHXADD with c3 for the X amount. 2040 if (isShiftedMask_64(Mask)) { 2041 unsigned Leading = XLen - (64 - countLeadingZeros(Mask)); 2042 unsigned Trailing = countTrailingZeros(Mask); 2043 if (LeftShift && Leading == 0 && C2 < Trailing && Trailing == ShAmt) { 2044 SDLoc DL(N); 2045 EVT VT = N.getValueType(); 2046 Val = SDValue(CurDAG->getMachineNode( 2047 RISCV::SRLI, DL, VT, N0.getOperand(0), 2048 CurDAG->getTargetConstant(Trailing - C2, DL, VT)), 2049 0); 2050 return true; 2051 } 2052 // Look for (and (shr y, c2), c1) where c1 is a shifted mask with c2 2053 // leading zeros and c3 trailing zeros. We can use an SRLI by C3 2054 // followed by a SHXADD using c3 for the X amount. 2055 if (!LeftShift && Leading == C2 && Trailing == ShAmt) { 2056 SDLoc DL(N); 2057 EVT VT = N.getValueType(); 2058 Val = SDValue( 2059 CurDAG->getMachineNode( 2060 RISCV::SRLI, DL, VT, N0.getOperand(0), 2061 CurDAG->getTargetConstant(Leading + Trailing, DL, VT)), 2062 0); 2063 return true; 2064 } 2065 } 2066 } 2067 } 2068 2069 bool LeftShift = N.getOpcode() == ISD::SHL; 2070 if ((LeftShift || N.getOpcode() == ISD::SRL) && 2071 isa<ConstantSDNode>(N.getOperand(1))) { 2072 SDValue N0 = N.getOperand(0); 2073 if (N0.getOpcode() == ISD::AND && N0.hasOneUse() && 2074 isa<ConstantSDNode>(N0.getOperand(1))) { 2075 uint64_t Mask = N0.getConstantOperandVal(1); 2076 if (isShiftedMask_64(Mask)) { 2077 unsigned C1 = N.getConstantOperandVal(1); 2078 unsigned XLen = Subtarget->getXLen(); 2079 unsigned Leading = XLen - (64 - countLeadingZeros(Mask)); 2080 unsigned Trailing = countTrailingZeros(Mask); 2081 // Look for (shl (and X, Mask), C1) where Mask has 32 leading zeros and 2082 // C3 trailing zeros. If C1+C3==ShAmt we can use SRLIW+SHXADD. 2083 if (LeftShift && Leading == 32 && Trailing > 0 && 2084 (Trailing + C1) == ShAmt) { 2085 SDLoc DL(N); 2086 EVT VT = N.getValueType(); 2087 Val = SDValue(CurDAG->getMachineNode( 2088 RISCV::SRLIW, DL, VT, N0.getOperand(0), 2089 CurDAG->getTargetConstant(Trailing, DL, VT)), 2090 0); 2091 return true; 2092 } 2093 // Look for (srl (and X, Mask), C1) where Mask has 32 leading zeros and 2094 // C3 trailing zeros. If C3-C1==ShAmt we can use SRLIW+SHXADD. 2095 if (!LeftShift && Leading == 32 && Trailing > C1 && 2096 (Trailing - C1) == ShAmt) { 2097 SDLoc DL(N); 2098 EVT VT = N.getValueType(); 2099 Val = SDValue(CurDAG->getMachineNode( 2100 RISCV::SRLIW, DL, VT, N0.getOperand(0), 2101 CurDAG->getTargetConstant(Trailing, DL, VT)), 2102 0); 2103 return true; 2104 } 2105 } 2106 } 2107 } 2108 2109 return false; 2110 } 2111 2112 // Return true if all users of this SDNode* only consume the lower \p Bits. 2113 // This can be used to form W instructions for add/sub/mul/shl even when the 2114 // root isn't a sext_inreg. This can allow the ADDW/SUBW/MULW/SLLIW to CSE if 2115 // SimplifyDemandedBits has made it so some users see a sext_inreg and some 2116 // don't. The sext_inreg+add/sub/mul/shl will get selected, but still leave 2117 // the add/sub/mul/shl to become non-W instructions. By checking the users we 2118 // may be able to use a W instruction and CSE with the other instruction if 2119 // this has happened. We could try to detect that the CSE opportunity exists 2120 // before doing this, but that would be more complicated. 2121 // TODO: Does this need to look through AND/OR/XOR to their users to find more 2122 // opportunities. 2123 bool RISCVDAGToDAGISel::hasAllNBitUsers(SDNode *Node, unsigned Bits) const { 2124 assert((Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::SUB || 2125 Node->getOpcode() == ISD::MUL || Node->getOpcode() == ISD::SHL || 2126 Node->getOpcode() == ISD::SRL || 2127 Node->getOpcode() == ISD::SIGN_EXTEND_INREG || 2128 Node->getOpcode() == RISCVISD::GREV || 2129 Node->getOpcode() == RISCVISD::GORC || 2130 isa<ConstantSDNode>(Node)) && 2131 "Unexpected opcode"); 2132 2133 for (auto UI = Node->use_begin(), UE = Node->use_end(); UI != UE; ++UI) { 2134 SDNode *User = *UI; 2135 // Users of this node should have already been instruction selected 2136 if (!User->isMachineOpcode()) 2137 return false; 2138 2139 // TODO: Add more opcodes? 2140 switch (User->getMachineOpcode()) { 2141 default: 2142 return false; 2143 case RISCV::ADDW: 2144 case RISCV::ADDIW: 2145 case RISCV::SUBW: 2146 case RISCV::MULW: 2147 case RISCV::SLLW: 2148 case RISCV::SLLIW: 2149 case RISCV::SRAW: 2150 case RISCV::SRAIW: 2151 case RISCV::SRLW: 2152 case RISCV::SRLIW: 2153 case RISCV::DIVW: 2154 case RISCV::DIVUW: 2155 case RISCV::REMW: 2156 case RISCV::REMUW: 2157 case RISCV::ROLW: 2158 case RISCV::RORW: 2159 case RISCV::RORIW: 2160 case RISCV::CLZW: 2161 case RISCV::CTZW: 2162 case RISCV::CPOPW: 2163 case RISCV::SLLI_UW: 2164 case RISCV::FMV_W_X: 2165 case RISCV::FCVT_H_W: 2166 case RISCV::FCVT_H_WU: 2167 case RISCV::FCVT_S_W: 2168 case RISCV::FCVT_S_WU: 2169 case RISCV::FCVT_D_W: 2170 case RISCV::FCVT_D_WU: 2171 if (Bits < 32) 2172 return false; 2173 break; 2174 case RISCV::SLLI: 2175 // SLLI only uses the lower (XLen - ShAmt) bits. 2176 if (Bits < Subtarget->getXLen() - User->getConstantOperandVal(1)) 2177 return false; 2178 break; 2179 case RISCV::ANDI: 2180 if (Bits < (64 - countLeadingZeros(User->getConstantOperandVal(1)))) 2181 return false; 2182 break; 2183 case RISCV::SEXT_B: 2184 if (Bits < 8) 2185 return false; 2186 break; 2187 case RISCV::SEXT_H: 2188 case RISCV::FMV_H_X: 2189 case RISCV::ZEXT_H_RV32: 2190 case RISCV::ZEXT_H_RV64: 2191 if (Bits < 16) 2192 return false; 2193 break; 2194 case RISCV::ADD_UW: 2195 case RISCV::SH1ADD_UW: 2196 case RISCV::SH2ADD_UW: 2197 case RISCV::SH3ADD_UW: 2198 // The first operand to add.uw/shXadd.uw is implicitly zero extended from 2199 // 32 bits. 2200 if (UI.getOperandNo() != 0 || Bits < 32) 2201 return false; 2202 break; 2203 case RISCV::SB: 2204 if (UI.getOperandNo() != 0 || Bits < 8) 2205 return false; 2206 break; 2207 case RISCV::SH: 2208 if (UI.getOperandNo() != 0 || Bits < 16) 2209 return false; 2210 break; 2211 case RISCV::SW: 2212 if (UI.getOperandNo() != 0 || Bits < 32) 2213 return false; 2214 break; 2215 } 2216 } 2217 2218 return true; 2219 } 2220 2221 // Select VL as a 5 bit immediate or a value that will become a register. This 2222 // allows us to choose betwen VSETIVLI or VSETVLI later. 2223 bool RISCVDAGToDAGISel::selectVLOp(SDValue N, SDValue &VL) { 2224 auto *C = dyn_cast<ConstantSDNode>(N); 2225 if (C && isUInt<5>(C->getZExtValue())) { 2226 VL = CurDAG->getTargetConstant(C->getZExtValue(), SDLoc(N), 2227 N->getValueType(0)); 2228 } else if (C && C->isAllOnesValue()) { 2229 // Treat all ones as VLMax. 2230 VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N), 2231 N->getValueType(0)); 2232 } else if (isa<RegisterSDNode>(N) && 2233 cast<RegisterSDNode>(N)->getReg() == RISCV::X0) { 2234 // All our VL operands use an operand that allows GPRNoX0 or an immediate 2235 // as the register class. Convert X0 to a special immediate to pass the 2236 // MachineVerifier. This is recognized specially by the vsetvli insertion 2237 // pass. 2238 VL = CurDAG->getTargetConstant(RISCV::VLMaxSentinel, SDLoc(N), 2239 N->getValueType(0)); 2240 } else { 2241 VL = N; 2242 } 2243 2244 return true; 2245 } 2246 2247 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) { 2248 if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef()) 2249 return false; 2250 SplatVal = N.getOperand(1); 2251 return true; 2252 } 2253 2254 using ValidateFn = bool (*)(int64_t); 2255 2256 static bool selectVSplatSimmHelper(SDValue N, SDValue &SplatVal, 2257 SelectionDAG &DAG, 2258 const RISCVSubtarget &Subtarget, 2259 ValidateFn ValidateImm) { 2260 if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() || 2261 !isa<ConstantSDNode>(N.getOperand(1))) 2262 return false; 2263 2264 int64_t SplatImm = 2265 cast<ConstantSDNode>(N.getOperand(1))->getSExtValue(); 2266 2267 // The semantics of RISCVISD::VMV_V_X_VL is that when the operand 2268 // type is wider than the resulting vector element type: an implicit 2269 // truncation first takes place. Therefore, perform a manual 2270 // truncation/sign-extension in order to ignore any truncated bits and catch 2271 // any zero-extended immediate. 2272 // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first 2273 // sign-extending to (XLenVT -1). 2274 MVT XLenVT = Subtarget.getXLenVT(); 2275 assert(XLenVT == N.getOperand(1).getSimpleValueType() && 2276 "Unexpected splat operand type"); 2277 MVT EltVT = N.getSimpleValueType().getVectorElementType(); 2278 if (EltVT.bitsLT(XLenVT)) 2279 SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits()); 2280 2281 if (!ValidateImm(SplatImm)) 2282 return false; 2283 2284 SplatVal = DAG.getTargetConstant(SplatImm, SDLoc(N), XLenVT); 2285 return true; 2286 } 2287 2288 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) { 2289 return selectVSplatSimmHelper(N, SplatVal, *CurDAG, *Subtarget, 2290 [](int64_t Imm) { return isInt<5>(Imm); }); 2291 } 2292 2293 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1(SDValue N, SDValue &SplatVal) { 2294 return selectVSplatSimmHelper( 2295 N, SplatVal, *CurDAG, *Subtarget, 2296 [](int64_t Imm) { return (isInt<5>(Imm) && Imm != -16) || Imm == 16; }); 2297 } 2298 2299 bool RISCVDAGToDAGISel::selectVSplatSimm5Plus1NonZero(SDValue N, 2300 SDValue &SplatVal) { 2301 return selectVSplatSimmHelper( 2302 N, SplatVal, *CurDAG, *Subtarget, [](int64_t Imm) { 2303 return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16); 2304 }); 2305 } 2306 2307 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) { 2308 if (N.getOpcode() != RISCVISD::VMV_V_X_VL || !N.getOperand(0).isUndef() || 2309 !isa<ConstantSDNode>(N.getOperand(1))) 2310 return false; 2311 2312 int64_t SplatImm = 2313 cast<ConstantSDNode>(N.getOperand(1))->getSExtValue(); 2314 2315 if (!isUInt<5>(SplatImm)) 2316 return false; 2317 2318 SplatVal = 2319 CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT()); 2320 2321 return true; 2322 } 2323 2324 bool RISCVDAGToDAGISel::selectRVVSimm5(SDValue N, unsigned Width, 2325 SDValue &Imm) { 2326 if (auto *C = dyn_cast<ConstantSDNode>(N)) { 2327 int64_t ImmVal = SignExtend64(C->getSExtValue(), Width); 2328 2329 if (!isInt<5>(ImmVal)) 2330 return false; 2331 2332 Imm = CurDAG->getTargetConstant(ImmVal, SDLoc(N), Subtarget->getXLenVT()); 2333 return true; 2334 } 2335 2336 return false; 2337 } 2338 2339 // Try to remove sext.w if the input is a W instruction or can be made into 2340 // a W instruction cheaply. 2341 bool RISCVDAGToDAGISel::doPeepholeSExtW(SDNode *N) { 2342 // Look for the sext.w pattern, addiw rd, rs1, 0. 2343 if (N->getMachineOpcode() != RISCV::ADDIW || 2344 !isNullConstant(N->getOperand(1))) 2345 return false; 2346 2347 SDValue N0 = N->getOperand(0); 2348 if (!N0.isMachineOpcode()) 2349 return false; 2350 2351 switch (N0.getMachineOpcode()) { 2352 default: 2353 break; 2354 case RISCV::ADD: 2355 case RISCV::ADDI: 2356 case RISCV::SUB: 2357 case RISCV::MUL: 2358 case RISCV::SLLI: { 2359 // Convert sext.w+add/sub/mul to their W instructions. This will create 2360 // a new independent instruction. This improves latency. 2361 unsigned Opc; 2362 switch (N0.getMachineOpcode()) { 2363 default: 2364 llvm_unreachable("Unexpected opcode!"); 2365 case RISCV::ADD: Opc = RISCV::ADDW; break; 2366 case RISCV::ADDI: Opc = RISCV::ADDIW; break; 2367 case RISCV::SUB: Opc = RISCV::SUBW; break; 2368 case RISCV::MUL: Opc = RISCV::MULW; break; 2369 case RISCV::SLLI: Opc = RISCV::SLLIW; break; 2370 } 2371 2372 SDValue N00 = N0.getOperand(0); 2373 SDValue N01 = N0.getOperand(1); 2374 2375 // Shift amount needs to be uimm5. 2376 if (N0.getMachineOpcode() == RISCV::SLLI && 2377 !isUInt<5>(cast<ConstantSDNode>(N01)->getSExtValue())) 2378 break; 2379 2380 SDNode *Result = 2381 CurDAG->getMachineNode(Opc, SDLoc(N), N->getValueType(0), 2382 N00, N01); 2383 ReplaceUses(N, Result); 2384 return true; 2385 } 2386 case RISCV::ADDW: 2387 case RISCV::ADDIW: 2388 case RISCV::SUBW: 2389 case RISCV::MULW: 2390 case RISCV::SLLIW: 2391 case RISCV::GREVIW: 2392 case RISCV::GORCIW: 2393 // Result is already sign extended just remove the sext.w. 2394 // NOTE: We only handle the nodes that are selected with hasAllWUsers. 2395 ReplaceUses(N, N0.getNode()); 2396 return true; 2397 } 2398 2399 return false; 2400 } 2401 2402 // Optimize masked RVV pseudo instructions with a known all-ones mask to their 2403 // corresponding "unmasked" pseudo versions. The mask we're interested in will 2404 // take the form of a V0 physical register operand, with a glued 2405 // register-setting instruction. 2406 bool RISCVDAGToDAGISel::doPeepholeMaskedRVV(SDNode *N) { 2407 const RISCV::RISCVMaskedPseudoInfo *I = 2408 RISCV::getMaskedPseudoInfo(N->getMachineOpcode()); 2409 if (!I) 2410 return false; 2411 2412 unsigned MaskOpIdx = I->MaskOpIdx; 2413 2414 // Check that we're using V0 as a mask register. 2415 if (!isa<RegisterSDNode>(N->getOperand(MaskOpIdx)) || 2416 cast<RegisterSDNode>(N->getOperand(MaskOpIdx))->getReg() != RISCV::V0) 2417 return false; 2418 2419 // The glued user defines V0. 2420 const auto *Glued = N->getGluedNode(); 2421 2422 if (!Glued || Glued->getOpcode() != ISD::CopyToReg) 2423 return false; 2424 2425 // Check that we're defining V0 as a mask register. 2426 if (!isa<RegisterSDNode>(Glued->getOperand(1)) || 2427 cast<RegisterSDNode>(Glued->getOperand(1))->getReg() != RISCV::V0) 2428 return false; 2429 2430 // Check the instruction defining V0; it needs to be a VMSET pseudo. 2431 SDValue MaskSetter = Glued->getOperand(2); 2432 2433 const auto IsVMSet = [](unsigned Opc) { 2434 return Opc == RISCV::PseudoVMSET_M_B1 || Opc == RISCV::PseudoVMSET_M_B16 || 2435 Opc == RISCV::PseudoVMSET_M_B2 || Opc == RISCV::PseudoVMSET_M_B32 || 2436 Opc == RISCV::PseudoVMSET_M_B4 || Opc == RISCV::PseudoVMSET_M_B64 || 2437 Opc == RISCV::PseudoVMSET_M_B8; 2438 }; 2439 2440 // TODO: Check that the VMSET is the expected bitwidth? The pseudo has 2441 // undefined behaviour if it's the wrong bitwidth, so we could choose to 2442 // assume that it's all-ones? Same applies to its VL. 2443 if (!MaskSetter->isMachineOpcode() || !IsVMSet(MaskSetter.getMachineOpcode())) 2444 return false; 2445 2446 // Retrieve the tail policy operand index, if any. 2447 Optional<unsigned> TailPolicyOpIdx; 2448 const RISCVInstrInfo &TII = *Subtarget->getInstrInfo(); 2449 const MCInstrDesc &MaskedMCID = TII.get(N->getMachineOpcode()); 2450 2451 bool IsTA = true; 2452 if (RISCVII::hasVecPolicyOp(MaskedMCID.TSFlags)) { 2453 // The last operand of the pseudo is the policy op, but we might have a 2454 // Glue operand last. We might also have a chain. 2455 TailPolicyOpIdx = N->getNumOperands() - 1; 2456 if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Glue) 2457 (*TailPolicyOpIdx)--; 2458 if (N->getOperand(*TailPolicyOpIdx).getValueType() == MVT::Other) 2459 (*TailPolicyOpIdx)--; 2460 2461 if (!(N->getConstantOperandVal(*TailPolicyOpIdx) & 2462 RISCVII::TAIL_AGNOSTIC)) { 2463 // Keep the true-masked instruction when there is no unmasked TU 2464 // instruction 2465 if (I->UnmaskedTUPseudo == I->MaskedPseudo && !N->getOperand(0).isUndef()) 2466 return false; 2467 // We can't use TA if the tie-operand is not IMPLICIT_DEF 2468 if (!N->getOperand(0).isUndef()) 2469 IsTA = false; 2470 } 2471 } 2472 2473 unsigned Opc = IsTA ? I->UnmaskedPseudo : I->UnmaskedTUPseudo; 2474 2475 // Check that we're dropping the mask operand and any policy operand 2476 // when we transform to this unmasked pseudo. Additionally, if this insturtion 2477 // is tail agnostic, the unmasked instruction should not have a merge op. 2478 uint64_t TSFlags = TII.get(Opc).TSFlags; 2479 assert((IsTA != RISCVII::hasMergeOp(TSFlags)) && 2480 RISCVII::hasDummyMaskOp(TSFlags) && 2481 !RISCVII::hasVecPolicyOp(TSFlags) && 2482 "Unexpected pseudo to transform to"); 2483 (void)TSFlags; 2484 2485 SmallVector<SDValue, 8> Ops; 2486 // Skip the merge operand at index 0 if IsTA 2487 for (unsigned I = IsTA, E = N->getNumOperands(); I != E; I++) { 2488 // Skip the mask, the policy, and the Glue. 2489 SDValue Op = N->getOperand(I); 2490 if (I == MaskOpIdx || I == TailPolicyOpIdx || 2491 Op.getValueType() == MVT::Glue) 2492 continue; 2493 Ops.push_back(Op); 2494 } 2495 2496 // Transitively apply any node glued to our new node. 2497 if (auto *TGlued = Glued->getGluedNode()) 2498 Ops.push_back(SDValue(TGlued, TGlued->getNumValues() - 1)); 2499 2500 SDNode *Result = CurDAG->getMachineNode(Opc, SDLoc(N), N->getVTList(), Ops); 2501 ReplaceUses(N, Result); 2502 2503 return true; 2504 } 2505 2506 // This pass converts a legalized DAG into a RISCV-specific DAG, ready 2507 // for instruction scheduling. 2508 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM, 2509 CodeGenOpt::Level OptLevel) { 2510 return new RISCVDAGToDAGISel(TM, OptLevel); 2511 } 2512