1 //===-- RISCVISelDAGToDAG.cpp - A dag to dag inst selector for RISCV ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines an instruction selector for the RISCV target. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "RISCVISelDAGToDAG.h" 14 #include "MCTargetDesc/RISCVMCTargetDesc.h" 15 #include "Utils/RISCVMatInt.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/IR/IntrinsicsRISCV.h" 18 #include "llvm/Support/Alignment.h" 19 #include "llvm/Support/Debug.h" 20 #include "llvm/Support/MathExtras.h" 21 #include "llvm/Support/raw_ostream.h" 22 23 using namespace llvm; 24 25 #define DEBUG_TYPE "riscv-isel" 26 27 void RISCVDAGToDAGISel::PostprocessISelDAG() { 28 doPeepholeLoadStoreADDI(); 29 } 30 31 static SDNode *selectImm(SelectionDAG *CurDAG, const SDLoc &DL, int64_t Imm, 32 MVT XLenVT) { 33 RISCVMatInt::InstSeq Seq; 34 RISCVMatInt::generateInstSeq(Imm, XLenVT == MVT::i64, Seq); 35 36 SDNode *Result = nullptr; 37 SDValue SrcReg = CurDAG->getRegister(RISCV::X0, XLenVT); 38 for (RISCVMatInt::Inst &Inst : Seq) { 39 SDValue SDImm = CurDAG->getTargetConstant(Inst.Imm, DL, XLenVT); 40 if (Inst.Opc == RISCV::LUI) 41 Result = CurDAG->getMachineNode(RISCV::LUI, DL, XLenVT, SDImm); 42 else 43 Result = CurDAG->getMachineNode(Inst.Opc, DL, XLenVT, SrcReg, SDImm); 44 45 // Only the first instruction has X0 as its source. 46 SrcReg = SDValue(Result, 0); 47 } 48 49 return Result; 50 } 51 52 // Returns true if the Node is an ISD::AND with a constant argument. If so, 53 // set Mask to that constant value. 54 static bool isConstantMask(SDNode *Node, uint64_t &Mask) { 55 if (Node->getOpcode() == ISD::AND && 56 Node->getOperand(1).getOpcode() == ISD::Constant) { 57 Mask = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 58 return true; 59 } 60 return false; 61 } 62 63 void RISCVDAGToDAGISel::Select(SDNode *Node) { 64 // If we have a custom node, we have already selected. 65 if (Node->isMachineOpcode()) { 66 LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << "\n"); 67 Node->setNodeId(-1); 68 return; 69 } 70 71 // Instruction Selection not handled by the auto-generated tablegen selection 72 // should be handled here. 73 unsigned Opcode = Node->getOpcode(); 74 MVT XLenVT = Subtarget->getXLenVT(); 75 SDLoc DL(Node); 76 EVT VT = Node->getValueType(0); 77 78 switch (Opcode) { 79 case ISD::ADD: { 80 // Optimize (add r, imm) to (addi (addi r, imm0) imm1) if applicable. The 81 // immediate must be in specific ranges and have a single use. 82 if (auto *ConstOp = dyn_cast<ConstantSDNode>(Node->getOperand(1))) { 83 if (!(ConstOp->hasOneUse())) 84 break; 85 // The imm must be in range [-4096,-2049] or [2048,4094]. 86 int64_t Imm = ConstOp->getSExtValue(); 87 if (!(-4096 <= Imm && Imm <= -2049) && !(2048 <= Imm && Imm <= 4094)) 88 break; 89 // Break the imm to imm0+imm1. 90 EVT VT = Node->getValueType(0); 91 const SDValue ImmOp0 = CurDAG->getTargetConstant(Imm - Imm / 2, DL, VT); 92 const SDValue ImmOp1 = CurDAG->getTargetConstant(Imm / 2, DL, VT); 93 auto *NodeAddi0 = CurDAG->getMachineNode(RISCV::ADDI, DL, VT, 94 Node->getOperand(0), ImmOp0); 95 auto *NodeAddi1 = CurDAG->getMachineNode(RISCV::ADDI, DL, VT, 96 SDValue(NodeAddi0, 0), ImmOp1); 97 ReplaceNode(Node, NodeAddi1); 98 return; 99 } 100 break; 101 } 102 case ISD::Constant: { 103 auto ConstNode = cast<ConstantSDNode>(Node); 104 if (VT == XLenVT && ConstNode->isNullValue()) { 105 SDValue New = 106 CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, RISCV::X0, XLenVT); 107 ReplaceNode(Node, New.getNode()); 108 return; 109 } 110 int64_t Imm = ConstNode->getSExtValue(); 111 if (XLenVT == MVT::i64) { 112 ReplaceNode(Node, selectImm(CurDAG, DL, Imm, XLenVT)); 113 return; 114 } 115 break; 116 } 117 case ISD::FrameIndex: { 118 SDValue Imm = CurDAG->getTargetConstant(0, DL, XLenVT); 119 int FI = cast<FrameIndexSDNode>(Node)->getIndex(); 120 SDValue TFI = CurDAG->getTargetFrameIndex(FI, VT); 121 ReplaceNode(Node, CurDAG->getMachineNode(RISCV::ADDI, DL, VT, TFI, Imm)); 122 return; 123 } 124 case ISD::SRL: { 125 if (!Subtarget->is64Bit()) 126 break; 127 SDNode *Op0 = Node->getOperand(0).getNode(); 128 uint64_t Mask; 129 // Match (srl (and val, mask), imm) where the result would be a 130 // zero-extended 32-bit integer. i.e. the mask is 0xffffffff or the result 131 // is equivalent to this (SimplifyDemandedBits may have removed lower bits 132 // from the mask that aren't necessary due to the right-shifting). 133 if (isa<ConstantSDNode>(Node->getOperand(1)) && isConstantMask(Op0, Mask)) { 134 uint64_t ShAmt = Node->getConstantOperandVal(1); 135 136 if ((Mask | maskTrailingOnes<uint64_t>(ShAmt)) == 0xffffffff) { 137 SDValue ShAmtVal = CurDAG->getTargetConstant(ShAmt, DL, XLenVT); 138 CurDAG->SelectNodeTo(Node, RISCV::SRLIW, XLenVT, Op0->getOperand(0), 139 ShAmtVal); 140 return; 141 } 142 } 143 break; 144 } 145 case ISD::INTRINSIC_W_CHAIN: { 146 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue(); 147 switch (IntNo) { 148 // By default we do not custom select any intrinsic. 149 default: 150 break; 151 152 case Intrinsic::riscv_vsetvli: { 153 if (!Subtarget->hasStdExtV()) 154 break; 155 156 assert(Node->getNumOperands() == 5); 157 158 RISCVVSEW VSEW = 159 static_cast<RISCVVSEW>(Node->getConstantOperandVal(3) & 0x7); 160 RISCVVLMUL VLMul = 161 static_cast<RISCVVLMUL>(Node->getConstantOperandVal(4) & 0x7); 162 163 unsigned VTypeI = RISCVVType::encodeVTYPE( 164 VLMul, VSEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false); 165 SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT); 166 167 SDValue VLOperand = Node->getOperand(2); 168 if (auto *C = dyn_cast<ConstantSDNode>(VLOperand)) { 169 if (C->isNullValue()) { 170 VLOperand = SDValue( 171 CurDAG->getMachineNode(RISCV::ADDI, DL, XLenVT, 172 CurDAG->getRegister(RISCV::X0, XLenVT), 173 CurDAG->getTargetConstant(0, DL, XLenVT)), 174 0); 175 } 176 } 177 178 ReplaceNode(Node, 179 CurDAG->getMachineNode(RISCV::PseudoVSETVLI, DL, XLenVT, 180 MVT::Other, VLOperand, VTypeIOp, 181 /* Chain */ Node->getOperand(0))); 182 return; 183 } 184 case Intrinsic::riscv_vsetvlimax: { 185 if (!Subtarget->hasStdExtV()) 186 break; 187 188 assert(Node->getNumOperands() == 4); 189 190 RISCVVSEW VSEW = 191 static_cast<RISCVVSEW>(Node->getConstantOperandVal(2) & 0x7); 192 RISCVVLMUL VLMul = 193 static_cast<RISCVVLMUL>(Node->getConstantOperandVal(3) & 0x7); 194 195 unsigned VTypeI = RISCVVType::encodeVTYPE( 196 VLMul, VSEW, /*TailAgnostic*/ true, /*MaskAgnostic*/ false); 197 SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT); 198 199 SDValue VLOperand = CurDAG->getRegister(RISCV::X0, XLenVT); 200 ReplaceNode(Node, 201 CurDAG->getMachineNode(RISCV::PseudoVSETVLI, DL, XLenVT, 202 MVT::Other, VLOperand, VTypeIOp, 203 /* Chain */ Node->getOperand(0))); 204 return; 205 } 206 } 207 break; 208 } 209 } 210 211 // Select the default instruction. 212 SelectCode(Node); 213 } 214 215 bool RISCVDAGToDAGISel::SelectInlineAsmMemoryOperand( 216 const SDValue &Op, unsigned ConstraintID, std::vector<SDValue> &OutOps) { 217 switch (ConstraintID) { 218 case InlineAsm::Constraint_m: 219 // We just support simple memory operands that have a single address 220 // operand and need no special handling. 221 OutOps.push_back(Op); 222 return false; 223 case InlineAsm::Constraint_A: 224 OutOps.push_back(Op); 225 return false; 226 default: 227 break; 228 } 229 230 return true; 231 } 232 233 bool RISCVDAGToDAGISel::SelectAddrFI(SDValue Addr, SDValue &Base) { 234 if (auto FIN = dyn_cast<FrameIndexSDNode>(Addr)) { 235 Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), Subtarget->getXLenVT()); 236 return true; 237 } 238 return false; 239 } 240 241 // Check that it is a SLOI (Shift Left Ones Immediate). We first check that 242 // it is the right node tree: 243 // 244 // (OR (SHL RS1, VC2), VC1) 245 // 246 // and then we check that VC1, the mask used to fill with ones, is compatible 247 // with VC2, the shamt: 248 // 249 // VC1 == maskTrailingOnes<uint64_t>(VC2) 250 251 bool RISCVDAGToDAGISel::SelectSLOI(SDValue N, SDValue &RS1, SDValue &Shamt) { 252 MVT XLenVT = Subtarget->getXLenVT(); 253 if (N.getOpcode() == ISD::OR) { 254 SDValue Or = N; 255 if (Or.getOperand(0).getOpcode() == ISD::SHL) { 256 SDValue Shl = Or.getOperand(0); 257 if (isa<ConstantSDNode>(Shl.getOperand(1)) && 258 isa<ConstantSDNode>(Or.getOperand(1))) { 259 if (XLenVT == MVT::i64) { 260 uint64_t VC1 = Or.getConstantOperandVal(1); 261 uint64_t VC2 = Shl.getConstantOperandVal(1); 262 if (VC1 == maskTrailingOnes<uint64_t>(VC2)) { 263 RS1 = Shl.getOperand(0); 264 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 265 Shl.getOperand(1).getValueType()); 266 return true; 267 } 268 } 269 if (XLenVT == MVT::i32) { 270 uint32_t VC1 = Or.getConstantOperandVal(1); 271 uint32_t VC2 = Shl.getConstantOperandVal(1); 272 if (VC1 == maskTrailingOnes<uint32_t>(VC2)) { 273 RS1 = Shl.getOperand(0); 274 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 275 Shl.getOperand(1).getValueType()); 276 return true; 277 } 278 } 279 } 280 } 281 } 282 return false; 283 } 284 285 // Check that it is a SROI (Shift Right Ones Immediate). We first check that 286 // it is the right node tree: 287 // 288 // (OR (SRL RS1, VC2), VC1) 289 // 290 // and then we check that VC1, the mask used to fill with ones, is compatible 291 // with VC2, the shamt: 292 // 293 // VC1 == maskLeadingOnes<uint64_t>(VC2) 294 295 bool RISCVDAGToDAGISel::SelectSROI(SDValue N, SDValue &RS1, SDValue &Shamt) { 296 MVT XLenVT = Subtarget->getXLenVT(); 297 if (N.getOpcode() == ISD::OR) { 298 SDValue Or = N; 299 if (Or.getOperand(0).getOpcode() == ISD::SRL) { 300 SDValue Srl = Or.getOperand(0); 301 if (isa<ConstantSDNode>(Srl.getOperand(1)) && 302 isa<ConstantSDNode>(Or.getOperand(1))) { 303 if (XLenVT == MVT::i64) { 304 uint64_t VC1 = Or.getConstantOperandVal(1); 305 uint64_t VC2 = Srl.getConstantOperandVal(1); 306 if (VC1 == maskLeadingOnes<uint64_t>(VC2)) { 307 RS1 = Srl.getOperand(0); 308 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 309 Srl.getOperand(1).getValueType()); 310 return true; 311 } 312 } 313 if (XLenVT == MVT::i32) { 314 uint32_t VC1 = Or.getConstantOperandVal(1); 315 uint32_t VC2 = Srl.getConstantOperandVal(1); 316 if (VC1 == maskLeadingOnes<uint32_t>(VC2)) { 317 RS1 = Srl.getOperand(0); 318 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 319 Srl.getOperand(1).getValueType()); 320 return true; 321 } 322 } 323 } 324 } 325 } 326 return false; 327 } 328 329 // Check that it is a SLLIUW (Shift Logical Left Immediate Unsigned i32 330 // on RV64). 331 // SLLIUW is the same as SLLI except for the fact that it clears the bits 332 // XLEN-1:32 of the input RS1 before shifting. 333 // We first check that it is the right node tree: 334 // 335 // (AND (SHL RS1, VC2), VC1) 336 // 337 // We check that VC2, the shamt is less than 32, otherwise the pattern is 338 // exactly the same as SLLI and we give priority to that. 339 // Eventually we check that that VC1, the mask used to clear the upper 32 bits 340 // of RS1, is correct: 341 // 342 // VC1 == (0xFFFFFFFF << VC2) 343 344 bool RISCVDAGToDAGISel::SelectSLLIUW(SDValue N, SDValue &RS1, SDValue &Shamt) { 345 if (N.getOpcode() == ISD::AND && Subtarget->getXLenVT() == MVT::i64) { 346 SDValue And = N; 347 if (And.getOperand(0).getOpcode() == ISD::SHL) { 348 SDValue Shl = And.getOperand(0); 349 if (isa<ConstantSDNode>(Shl.getOperand(1)) && 350 isa<ConstantSDNode>(And.getOperand(1))) { 351 uint64_t VC1 = And.getConstantOperandVal(1); 352 uint64_t VC2 = Shl.getConstantOperandVal(1); 353 if (VC2 < 32 && VC1 == ((uint64_t)0xFFFFFFFF << VC2)) { 354 RS1 = Shl.getOperand(0); 355 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 356 Shl.getOperand(1).getValueType()); 357 return true; 358 } 359 } 360 } 361 } 362 return false; 363 } 364 365 // Check that it is a SLOIW (Shift Left Ones Immediate i32 on RV64). 366 // We first check that it is the right node tree: 367 // 368 // (SIGN_EXTEND_INREG (OR (SHL RS1, VC2), VC1)) 369 // 370 // and then we check that VC1, the mask used to fill with ones, is compatible 371 // with VC2, the shamt: 372 // 373 // VC2 < 32 374 // VC1 == maskTrailingOnes<uint64_t>(VC2) 375 376 bool RISCVDAGToDAGISel::SelectSLOIW(SDValue N, SDValue &RS1, SDValue &Shamt) { 377 assert(Subtarget->is64Bit() && "SLOIW should only be matched on RV64"); 378 if (N.getOpcode() != ISD::SIGN_EXTEND_INREG || 379 cast<VTSDNode>(N.getOperand(1))->getVT() != MVT::i32) 380 return false; 381 382 SDValue Or = N.getOperand(0); 383 384 if (Or.getOpcode() != ISD::OR || !isa<ConstantSDNode>(Or.getOperand(1))) 385 return false; 386 387 SDValue Shl = Or.getOperand(0); 388 if (Shl.getOpcode() != ISD::SHL || !isa<ConstantSDNode>(Shl.getOperand(1))) 389 return false; 390 391 uint64_t VC1 = Or.getConstantOperandVal(1); 392 uint64_t VC2 = Shl.getConstantOperandVal(1); 393 394 if (VC2 >= 32 || VC1 != maskTrailingOnes<uint64_t>(VC2)) 395 return false; 396 397 RS1 = Shl.getOperand(0); 398 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 399 Shl.getOperand(1).getValueType()); 400 return true; 401 } 402 403 // Check that it is a SROIW (Shift Right Ones Immediate i32 on RV64). 404 // We first check that it is the right node tree: 405 // 406 // (OR (SRL RS1, VC2), VC1) 407 // 408 // and then we check that VC1, the mask used to fill with ones, is compatible 409 // with VC2, the shamt: 410 // 411 // VC2 < 32 412 // VC1 == maskTrailingZeros<uint64_t>(32 - VC2) 413 // 414 bool RISCVDAGToDAGISel::SelectSROIW(SDValue N, SDValue &RS1, SDValue &Shamt) { 415 assert(Subtarget->is64Bit() && "SROIW should only be matched on RV64"); 416 if (N.getOpcode() != ISD::OR || !isa<ConstantSDNode>(N.getOperand(1))) 417 return false; 418 419 SDValue Srl = N.getOperand(0); 420 if (Srl.getOpcode() != ISD::SRL || !isa<ConstantSDNode>(Srl.getOperand(1))) 421 return false; 422 423 uint64_t VC1 = N.getConstantOperandVal(1); 424 uint64_t VC2 = Srl.getConstantOperandVal(1); 425 426 if (VC2 >= 32 || VC1 != maskTrailingZeros<uint64_t>(32 - VC2)) 427 return false; 428 429 RS1 = Srl.getOperand(0); 430 Shamt = CurDAG->getTargetConstant(VC2, SDLoc(N), 431 Srl.getOperand(1).getValueType()); 432 return true; 433 } 434 435 bool RISCVDAGToDAGISel::selectVSplat(SDValue N, SDValue &SplatVal) { 436 if (N.getOpcode() != ISD::SPLAT_VECTOR && 437 N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64) 438 return false; 439 SplatVal = N.getOperand(0); 440 return true; 441 } 442 443 bool RISCVDAGToDAGISel::selectVSplatSimm5(SDValue N, SDValue &SplatVal) { 444 if ((N.getOpcode() != ISD::SPLAT_VECTOR && 445 N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64) || 446 !isa<ConstantSDNode>(N.getOperand(0))) 447 return false; 448 449 int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue(); 450 451 // Both ISD::SPLAT_VECTOR and RISCVISD::SPLAT_VECTOR_I64 share semantics when 452 // the operand type is wider than the resulting vector element type: an 453 // implicit truncation first takes place. Therefore, perform a manual 454 // truncation/sign-extension in order to ignore any truncated bits and catch 455 // any zero-extended immediate. 456 // For example, we wish to match (i8 -1) -> (XLenVT 255) as a simm5 by first 457 // sign-extending to (XLenVT -1). 458 auto XLenVT = Subtarget->getXLenVT(); 459 assert(XLenVT == N.getOperand(0).getSimpleValueType() && 460 "Unexpected splat operand type"); 461 auto EltVT = N.getValueType().getVectorElementType(); 462 if (EltVT.bitsLT(XLenVT)) { 463 SplatImm = SignExtend64(SplatImm, EltVT.getSizeInBits()); 464 } 465 466 if (!isInt<5>(SplatImm)) 467 return false; 468 469 SplatVal = CurDAG->getTargetConstant(SplatImm, SDLoc(N), XLenVT); 470 return true; 471 } 472 473 bool RISCVDAGToDAGISel::selectVSplatUimm5(SDValue N, SDValue &SplatVal) { 474 if ((N.getOpcode() != ISD::SPLAT_VECTOR && 475 N.getOpcode() != RISCVISD::SPLAT_VECTOR_I64) || 476 !isa<ConstantSDNode>(N.getOperand(0))) 477 return false; 478 479 int64_t SplatImm = cast<ConstantSDNode>(N.getOperand(0))->getSExtValue(); 480 481 if (!isUInt<5>(SplatImm)) 482 return false; 483 484 SplatVal = 485 CurDAG->getTargetConstant(SplatImm, SDLoc(N), Subtarget->getXLenVT()); 486 487 return true; 488 } 489 490 // Merge an ADDI into the offset of a load/store instruction where possible. 491 // (load (addi base, off1), off2) -> (load base, off1+off2) 492 // (store val, (addi base, off1), off2) -> (store val, base, off1+off2) 493 // This is possible when off1+off2 fits a 12-bit immediate. 494 void RISCVDAGToDAGISel::doPeepholeLoadStoreADDI() { 495 SelectionDAG::allnodes_iterator Position(CurDAG->getRoot().getNode()); 496 ++Position; 497 498 while (Position != CurDAG->allnodes_begin()) { 499 SDNode *N = &*--Position; 500 // Skip dead nodes and any non-machine opcodes. 501 if (N->use_empty() || !N->isMachineOpcode()) 502 continue; 503 504 int OffsetOpIdx; 505 int BaseOpIdx; 506 507 // Only attempt this optimisation for I-type loads and S-type stores. 508 switch (N->getMachineOpcode()) { 509 default: 510 continue; 511 case RISCV::LB: 512 case RISCV::LH: 513 case RISCV::LW: 514 case RISCV::LBU: 515 case RISCV::LHU: 516 case RISCV::LWU: 517 case RISCV::LD: 518 case RISCV::FLH: 519 case RISCV::FLW: 520 case RISCV::FLD: 521 BaseOpIdx = 0; 522 OffsetOpIdx = 1; 523 break; 524 case RISCV::SB: 525 case RISCV::SH: 526 case RISCV::SW: 527 case RISCV::SD: 528 case RISCV::FSH: 529 case RISCV::FSW: 530 case RISCV::FSD: 531 BaseOpIdx = 1; 532 OffsetOpIdx = 2; 533 break; 534 } 535 536 if (!isa<ConstantSDNode>(N->getOperand(OffsetOpIdx))) 537 continue; 538 539 SDValue Base = N->getOperand(BaseOpIdx); 540 541 // If the base is an ADDI, we can merge it in to the load/store. 542 if (!Base.isMachineOpcode() || Base.getMachineOpcode() != RISCV::ADDI) 543 continue; 544 545 SDValue ImmOperand = Base.getOperand(1); 546 uint64_t Offset2 = N->getConstantOperandVal(OffsetOpIdx); 547 548 if (auto Const = dyn_cast<ConstantSDNode>(ImmOperand)) { 549 int64_t Offset1 = Const->getSExtValue(); 550 int64_t CombinedOffset = Offset1 + Offset2; 551 if (!isInt<12>(CombinedOffset)) 552 continue; 553 ImmOperand = CurDAG->getTargetConstant(CombinedOffset, SDLoc(ImmOperand), 554 ImmOperand.getValueType()); 555 } else if (auto GA = dyn_cast<GlobalAddressSDNode>(ImmOperand)) { 556 // If the off1 in (addi base, off1) is a global variable's address (its 557 // low part, really), then we can rely on the alignment of that variable 558 // to provide a margin of safety before off1 can overflow the 12 bits. 559 // Check if off2 falls within that margin; if so off1+off2 can't overflow. 560 const DataLayout &DL = CurDAG->getDataLayout(); 561 Align Alignment = GA->getGlobal()->getPointerAlignment(DL); 562 if (Offset2 != 0 && Alignment <= Offset2) 563 continue; 564 int64_t Offset1 = GA->getOffset(); 565 int64_t CombinedOffset = Offset1 + Offset2; 566 ImmOperand = CurDAG->getTargetGlobalAddress( 567 GA->getGlobal(), SDLoc(ImmOperand), ImmOperand.getValueType(), 568 CombinedOffset, GA->getTargetFlags()); 569 } else if (auto CP = dyn_cast<ConstantPoolSDNode>(ImmOperand)) { 570 // Ditto. 571 Align Alignment = CP->getAlign(); 572 if (Offset2 != 0 && Alignment <= Offset2) 573 continue; 574 int64_t Offset1 = CP->getOffset(); 575 int64_t CombinedOffset = Offset1 + Offset2; 576 ImmOperand = CurDAG->getTargetConstantPool( 577 CP->getConstVal(), ImmOperand.getValueType(), CP->getAlign(), 578 CombinedOffset, CP->getTargetFlags()); 579 } else { 580 continue; 581 } 582 583 LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: "); 584 LLVM_DEBUG(Base->dump(CurDAG)); 585 LLVM_DEBUG(dbgs() << "\nN: "); 586 LLVM_DEBUG(N->dump(CurDAG)); 587 LLVM_DEBUG(dbgs() << "\n"); 588 589 // Modify the offset operand of the load/store. 590 if (BaseOpIdx == 0) // Load 591 CurDAG->UpdateNodeOperands(N, Base.getOperand(0), ImmOperand, 592 N->getOperand(2)); 593 else // Store 594 CurDAG->UpdateNodeOperands(N, N->getOperand(0), Base.getOperand(0), 595 ImmOperand, N->getOperand(3)); 596 597 // The add-immediate may now be dead, in which case remove it. 598 if (Base.getNode()->use_empty()) 599 CurDAG->RemoveDeadNode(Base.getNode()); 600 } 601 } 602 603 // This pass converts a legalized DAG into a RISCV-specific DAG, ready 604 // for instruction scheduling. 605 FunctionPass *llvm::createRISCVISelDag(RISCVTargetMachine &TM) { 606 return new RISCVDAGToDAGISel(TM); 607 } 608