1 //===-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ --===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines an instruction selector for the SystemZ target. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZTargetMachine.h" 15 #include "llvm/Analysis/AliasAnalysis.h" 16 #include "llvm/CodeGen/SelectionDAGISel.h" 17 #include "llvm/Support/Debug.h" 18 #include "llvm/Support/KnownBits.h" 19 #include "llvm/Support/raw_ostream.h" 20 21 using namespace llvm; 22 23 #define DEBUG_TYPE "systemz-isel" 24 25 namespace { 26 // Used to build addressing modes. 27 struct SystemZAddressingMode { 28 // The shape of the address. 29 enum AddrForm { 30 // base+displacement 31 FormBD, 32 33 // base+displacement+index for load and store operands 34 FormBDXNormal, 35 36 // base+displacement+index for load address operands 37 FormBDXLA, 38 39 // base+displacement+index+ADJDYNALLOC 40 FormBDXDynAlloc 41 }; 42 AddrForm Form; 43 44 // The type of displacement. The enum names here correspond directly 45 // to the definitions in SystemZOperand.td. We could split them into 46 // flags -- single/pair, 128-bit, etc. -- but it hardly seems worth it. 47 enum DispRange { 48 Disp12Only, 49 Disp12Pair, 50 Disp20Only, 51 Disp20Only128, 52 Disp20Pair 53 }; 54 DispRange DR; 55 56 // The parts of the address. The address is equivalent to: 57 // 58 // Base + Disp + Index + (IncludesDynAlloc ? ADJDYNALLOC : 0) 59 SDValue Base; 60 int64_t Disp; 61 SDValue Index; 62 bool IncludesDynAlloc; 63 64 SystemZAddressingMode(AddrForm form, DispRange dr) 65 : Form(form), DR(dr), Base(), Disp(0), Index(), 66 IncludesDynAlloc(false) {} 67 68 // True if the address can have an index register. 69 bool hasIndexField() { return Form != FormBD; } 70 71 // True if the address can (and must) include ADJDYNALLOC. 72 bool isDynAlloc() { return Form == FormBDXDynAlloc; } 73 74 void dump() { 75 errs() << "SystemZAddressingMode " << this << '\n'; 76 77 errs() << " Base "; 78 if (Base.getNode()) 79 Base.getNode()->dump(); 80 else 81 errs() << "null\n"; 82 83 if (hasIndexField()) { 84 errs() << " Index "; 85 if (Index.getNode()) 86 Index.getNode()->dump(); 87 else 88 errs() << "null\n"; 89 } 90 91 errs() << " Disp " << Disp; 92 if (IncludesDynAlloc) 93 errs() << " + ADJDYNALLOC"; 94 errs() << '\n'; 95 } 96 }; 97 98 // Return a mask with Count low bits set. 99 static uint64_t allOnes(unsigned int Count) { 100 assert(Count <= 64); 101 if (Count > 63) 102 return UINT64_MAX; 103 return (uint64_t(1) << Count) - 1; 104 } 105 106 // Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation 107 // given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and 108 // Rotate (I5). The combined operand value is effectively: 109 // 110 // (or (rotl Input, Rotate), ~Mask) 111 // 112 // for RNSBG and: 113 // 114 // (and (rotl Input, Rotate), Mask) 115 // 116 // otherwise. The output value has BitSize bits, although Input may be 117 // narrower (in which case the upper bits are don't care), or wider (in which 118 // case the result will be truncated as part of the operation). 119 struct RxSBGOperands { 120 RxSBGOperands(unsigned Op, SDValue N) 121 : Opcode(Op), BitSize(N.getValueSizeInBits()), 122 Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63), 123 Rotate(0) {} 124 125 unsigned Opcode; 126 unsigned BitSize; 127 uint64_t Mask; 128 SDValue Input; 129 unsigned Start; 130 unsigned End; 131 unsigned Rotate; 132 }; 133 134 class SystemZDAGToDAGISel : public SelectionDAGISel { 135 const SystemZSubtarget *Subtarget; 136 137 // Used by SystemZOperands.td to create integer constants. 138 inline SDValue getImm(const SDNode *Node, uint64_t Imm) const { 139 return CurDAG->getTargetConstant(Imm, SDLoc(Node), Node->getValueType(0)); 140 } 141 142 const SystemZTargetMachine &getTargetMachine() const { 143 return static_cast<const SystemZTargetMachine &>(TM); 144 } 145 146 const SystemZInstrInfo *getInstrInfo() const { 147 return Subtarget->getInstrInfo(); 148 } 149 150 // Try to fold more of the base or index of AM into AM, where IsBase 151 // selects between the base and index. 152 bool expandAddress(SystemZAddressingMode &AM, bool IsBase) const; 153 154 // Try to describe N in AM, returning true on success. 155 bool selectAddress(SDValue N, SystemZAddressingMode &AM) const; 156 157 // Extract individual target operands from matched address AM. 158 void getAddressOperands(const SystemZAddressingMode &AM, EVT VT, 159 SDValue &Base, SDValue &Disp) const; 160 void getAddressOperands(const SystemZAddressingMode &AM, EVT VT, 161 SDValue &Base, SDValue &Disp, SDValue &Index) const; 162 163 // Try to match Addr as a FormBD address with displacement type DR. 164 // Return true on success, storing the base and displacement in 165 // Base and Disp respectively. 166 bool selectBDAddr(SystemZAddressingMode::DispRange DR, SDValue Addr, 167 SDValue &Base, SDValue &Disp) const; 168 169 // Try to match Addr as a FormBDX address with displacement type DR. 170 // Return true on success and if the result had no index. Store the 171 // base and displacement in Base and Disp respectively. 172 bool selectMVIAddr(SystemZAddressingMode::DispRange DR, SDValue Addr, 173 SDValue &Base, SDValue &Disp) const; 174 175 // Try to match Addr as a FormBDX* address of form Form with 176 // displacement type DR. Return true on success, storing the base, 177 // displacement and index in Base, Disp and Index respectively. 178 bool selectBDXAddr(SystemZAddressingMode::AddrForm Form, 179 SystemZAddressingMode::DispRange DR, SDValue Addr, 180 SDValue &Base, SDValue &Disp, SDValue &Index) const; 181 182 // PC-relative address matching routines used by SystemZOperands.td. 183 bool selectPCRelAddress(SDValue Addr, SDValue &Target) const { 184 if (SystemZISD::isPCREL(Addr.getOpcode())) { 185 Target = Addr.getOperand(0); 186 return true; 187 } 188 return false; 189 } 190 191 // BD matching routines used by SystemZOperands.td. 192 bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) const { 193 return selectBDAddr(SystemZAddressingMode::Disp12Only, Addr, Base, Disp); 194 } 195 bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 196 return selectBDAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp); 197 } 198 bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) const { 199 return selectBDAddr(SystemZAddressingMode::Disp20Only, Addr, Base, Disp); 200 } 201 bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 202 return selectBDAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp); 203 } 204 205 // MVI matching routines used by SystemZOperands.td. 206 bool selectMVIAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 207 return selectMVIAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp); 208 } 209 bool selectMVIAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 210 return selectMVIAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp); 211 } 212 213 // BDX matching routines used by SystemZOperands.td. 214 bool selectBDXAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp, 215 SDValue &Index) const { 216 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 217 SystemZAddressingMode::Disp12Only, 218 Addr, Base, Disp, Index); 219 } 220 bool selectBDXAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 221 SDValue &Index) const { 222 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 223 SystemZAddressingMode::Disp12Pair, 224 Addr, Base, Disp, Index); 225 } 226 bool selectDynAlloc12Only(SDValue Addr, SDValue &Base, SDValue &Disp, 227 SDValue &Index) const { 228 return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc, 229 SystemZAddressingMode::Disp12Only, 230 Addr, Base, Disp, Index); 231 } 232 bool selectBDXAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp, 233 SDValue &Index) const { 234 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 235 SystemZAddressingMode::Disp20Only, 236 Addr, Base, Disp, Index); 237 } 238 bool selectBDXAddr20Only128(SDValue Addr, SDValue &Base, SDValue &Disp, 239 SDValue &Index) const { 240 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 241 SystemZAddressingMode::Disp20Only128, 242 Addr, Base, Disp, Index); 243 } 244 bool selectBDXAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 245 SDValue &Index) const { 246 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 247 SystemZAddressingMode::Disp20Pair, 248 Addr, Base, Disp, Index); 249 } 250 bool selectLAAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 251 SDValue &Index) const { 252 return selectBDXAddr(SystemZAddressingMode::FormBDXLA, 253 SystemZAddressingMode::Disp12Pair, 254 Addr, Base, Disp, Index); 255 } 256 bool selectLAAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 257 SDValue &Index) const { 258 return selectBDXAddr(SystemZAddressingMode::FormBDXLA, 259 SystemZAddressingMode::Disp20Pair, 260 Addr, Base, Disp, Index); 261 } 262 263 // Try to match Addr as an address with a base, 12-bit displacement 264 // and index, where the index is element Elem of a vector. 265 // Return true on success, storing the base, displacement and vector 266 // in Base, Disp and Index respectively. 267 bool selectBDVAddr12Only(SDValue Addr, SDValue Elem, SDValue &Base, 268 SDValue &Disp, SDValue &Index) const; 269 270 // Check whether (or Op (and X InsertMask)) is effectively an insertion 271 // of X into bits InsertMask of some Y != Op. Return true if so and 272 // set Op to that Y. 273 bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask) const; 274 275 // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used. 276 // Return true on success. 277 bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) const; 278 279 // Try to fold some of RxSBG.Input into other fields of RxSBG. 280 // Return true on success. 281 bool expandRxSBG(RxSBGOperands &RxSBG) const; 282 283 // Return an undefined value of type VT. 284 SDValue getUNDEF(const SDLoc &DL, EVT VT) const; 285 286 // Convert N to VT, if it isn't already. 287 SDValue convertTo(const SDLoc &DL, EVT VT, SDValue N) const; 288 289 // Try to implement AND or shift node N using RISBG with the zero flag set. 290 // Return the selected node on success, otherwise return null. 291 bool tryRISBGZero(SDNode *N); 292 293 // Try to use RISBG or Opcode to implement OR or XOR node N. 294 // Return the selected node on success, otherwise return null. 295 bool tryRxSBG(SDNode *N, unsigned Opcode); 296 297 // If Op0 is null, then Node is a constant that can be loaded using: 298 // 299 // (Opcode UpperVal LowerVal) 300 // 301 // If Op0 is nonnull, then Node can be implemented using: 302 // 303 // (Opcode (Opcode Op0 UpperVal) LowerVal) 304 void splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0, 305 uint64_t UpperVal, uint64_t LowerVal); 306 307 // Try to use gather instruction Opcode to implement vector insertion N. 308 bool tryGather(SDNode *N, unsigned Opcode); 309 310 // Try to use scatter instruction Opcode to implement store Store. 311 bool tryScatter(StoreSDNode *Store, unsigned Opcode); 312 313 // Return true if Load and Store are loads and stores of the same size 314 // and are guaranteed not to overlap. Such operations can be implemented 315 // using block (SS-format) instructions. 316 // 317 // Partial overlap would lead to incorrect code, since the block operations 318 // are logically bytewise, even though they have a fast path for the 319 // non-overlapping case. We also need to avoid full overlap (i.e. two 320 // addresses that might be equal at run time) because although that case 321 // would be handled correctly, it might be implemented by millicode. 322 bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const; 323 324 // N is a (store (load Y), X) pattern. Return true if it can use an MVC 325 // from Y to X. 326 bool storeLoadCanUseMVC(SDNode *N) const; 327 328 // N is a (store (op (load A[0]), (load A[1])), X) pattern. Return true 329 // if A[1 - I] == X and if N can use a block operation like NC from A[I] 330 // to X. 331 bool storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const; 332 333 public: 334 SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel) 335 : SelectionDAGISel(TM, OptLevel) {} 336 337 bool runOnMachineFunction(MachineFunction &MF) override { 338 Subtarget = &MF.getSubtarget<SystemZSubtarget>(); 339 return SelectionDAGISel::runOnMachineFunction(MF); 340 } 341 342 // Override MachineFunctionPass. 343 StringRef getPassName() const override { 344 return "SystemZ DAG->DAG Pattern Instruction Selection"; 345 } 346 347 // Override SelectionDAGISel. 348 void Select(SDNode *Node) override; 349 bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, 350 std::vector<SDValue> &OutOps) override; 351 352 // Include the pieces autogenerated from the target description. 353 #include "SystemZGenDAGISel.inc" 354 }; 355 } // end anonymous namespace 356 357 FunctionPass *llvm::createSystemZISelDag(SystemZTargetMachine &TM, 358 CodeGenOpt::Level OptLevel) { 359 return new SystemZDAGToDAGISel(TM, OptLevel); 360 } 361 362 // Return true if Val should be selected as a displacement for an address 363 // with range DR. Here we're interested in the range of both the instruction 364 // described by DR and of any pairing instruction. 365 static bool selectDisp(SystemZAddressingMode::DispRange DR, int64_t Val) { 366 switch (DR) { 367 case SystemZAddressingMode::Disp12Only: 368 return isUInt<12>(Val); 369 370 case SystemZAddressingMode::Disp12Pair: 371 case SystemZAddressingMode::Disp20Only: 372 case SystemZAddressingMode::Disp20Pair: 373 return isInt<20>(Val); 374 375 case SystemZAddressingMode::Disp20Only128: 376 return isInt<20>(Val) && isInt<20>(Val + 8); 377 } 378 llvm_unreachable("Unhandled displacement range"); 379 } 380 381 // Change the base or index in AM to Value, where IsBase selects 382 // between the base and index. 383 static void changeComponent(SystemZAddressingMode &AM, bool IsBase, 384 SDValue Value) { 385 if (IsBase) 386 AM.Base = Value; 387 else 388 AM.Index = Value; 389 } 390 391 // The base or index of AM is equivalent to Value + ADJDYNALLOC, 392 // where IsBase selects between the base and index. Try to fold the 393 // ADJDYNALLOC into AM. 394 static bool expandAdjDynAlloc(SystemZAddressingMode &AM, bool IsBase, 395 SDValue Value) { 396 if (AM.isDynAlloc() && !AM.IncludesDynAlloc) { 397 changeComponent(AM, IsBase, Value); 398 AM.IncludesDynAlloc = true; 399 return true; 400 } 401 return false; 402 } 403 404 // The base of AM is equivalent to Base + Index. Try to use Index as 405 // the index register. 406 static bool expandIndex(SystemZAddressingMode &AM, SDValue Base, 407 SDValue Index) { 408 if (AM.hasIndexField() && !AM.Index.getNode()) { 409 AM.Base = Base; 410 AM.Index = Index; 411 return true; 412 } 413 return false; 414 } 415 416 // The base or index of AM is equivalent to Op0 + Op1, where IsBase selects 417 // between the base and index. Try to fold Op1 into AM's displacement. 418 static bool expandDisp(SystemZAddressingMode &AM, bool IsBase, 419 SDValue Op0, uint64_t Op1) { 420 // First try adjusting the displacement. 421 int64_t TestDisp = AM.Disp + Op1; 422 if (selectDisp(AM.DR, TestDisp)) { 423 changeComponent(AM, IsBase, Op0); 424 AM.Disp = TestDisp; 425 return true; 426 } 427 428 // We could consider forcing the displacement into a register and 429 // using it as an index, but it would need to be carefully tuned. 430 return false; 431 } 432 433 bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM, 434 bool IsBase) const { 435 SDValue N = IsBase ? AM.Base : AM.Index; 436 unsigned Opcode = N.getOpcode(); 437 if (Opcode == ISD::TRUNCATE) { 438 N = N.getOperand(0); 439 Opcode = N.getOpcode(); 440 } 441 if (Opcode == ISD::ADD || CurDAG->isBaseWithConstantOffset(N)) { 442 SDValue Op0 = N.getOperand(0); 443 SDValue Op1 = N.getOperand(1); 444 445 unsigned Op0Code = Op0->getOpcode(); 446 unsigned Op1Code = Op1->getOpcode(); 447 448 if (Op0Code == SystemZISD::ADJDYNALLOC) 449 return expandAdjDynAlloc(AM, IsBase, Op1); 450 if (Op1Code == SystemZISD::ADJDYNALLOC) 451 return expandAdjDynAlloc(AM, IsBase, Op0); 452 453 if (Op0Code == ISD::Constant) 454 return expandDisp(AM, IsBase, Op1, 455 cast<ConstantSDNode>(Op0)->getSExtValue()); 456 if (Op1Code == ISD::Constant) 457 return expandDisp(AM, IsBase, Op0, 458 cast<ConstantSDNode>(Op1)->getSExtValue()); 459 460 if (IsBase && expandIndex(AM, Op0, Op1)) 461 return true; 462 } 463 if (Opcode == SystemZISD::PCREL_OFFSET) { 464 SDValue Full = N.getOperand(0); 465 SDValue Base = N.getOperand(1); 466 SDValue Anchor = Base.getOperand(0); 467 uint64_t Offset = (cast<GlobalAddressSDNode>(Full)->getOffset() - 468 cast<GlobalAddressSDNode>(Anchor)->getOffset()); 469 return expandDisp(AM, IsBase, Base, Offset); 470 } 471 return false; 472 } 473 474 // Return true if an instruction with displacement range DR should be 475 // used for displacement value Val. selectDisp(DR, Val) must already hold. 476 static bool isValidDisp(SystemZAddressingMode::DispRange DR, int64_t Val) { 477 assert(selectDisp(DR, Val) && "Invalid displacement"); 478 switch (DR) { 479 case SystemZAddressingMode::Disp12Only: 480 case SystemZAddressingMode::Disp20Only: 481 case SystemZAddressingMode::Disp20Only128: 482 return true; 483 484 case SystemZAddressingMode::Disp12Pair: 485 // Use the other instruction if the displacement is too large. 486 return isUInt<12>(Val); 487 488 case SystemZAddressingMode::Disp20Pair: 489 // Use the other instruction if the displacement is small enough. 490 return !isUInt<12>(Val); 491 } 492 llvm_unreachable("Unhandled displacement range"); 493 } 494 495 // Return true if Base + Disp + Index should be performed by LA(Y). 496 static bool shouldUseLA(SDNode *Base, int64_t Disp, SDNode *Index) { 497 // Don't use LA(Y) for constants. 498 if (!Base) 499 return false; 500 501 // Always use LA(Y) for frame addresses, since we know that the destination 502 // register is almost always (perhaps always) going to be different from 503 // the frame register. 504 if (Base->getOpcode() == ISD::FrameIndex) 505 return true; 506 507 if (Disp) { 508 // Always use LA(Y) if there is a base, displacement and index. 509 if (Index) 510 return true; 511 512 // Always use LA if the displacement is small enough. It should always 513 // be no worse than AGHI (and better if it avoids a move). 514 if (isUInt<12>(Disp)) 515 return true; 516 517 // For similar reasons, always use LAY if the constant is too big for AGHI. 518 // LAY should be no worse than AGFI. 519 if (!isInt<16>(Disp)) 520 return true; 521 } else { 522 // Don't use LA for plain registers. 523 if (!Index) 524 return false; 525 526 // Don't use LA for plain addition if the index operand is only used 527 // once. It should be a natural two-operand addition in that case. 528 if (Index->hasOneUse()) 529 return false; 530 531 // Prefer addition if the second operation is sign-extended, in the 532 // hope of using AGF. 533 unsigned IndexOpcode = Index->getOpcode(); 534 if (IndexOpcode == ISD::SIGN_EXTEND || 535 IndexOpcode == ISD::SIGN_EXTEND_INREG) 536 return false; 537 } 538 539 // Don't use LA for two-operand addition if either operand is only 540 // used once. The addition instructions are better in that case. 541 if (Base->hasOneUse()) 542 return false; 543 544 return true; 545 } 546 547 // Return true if Addr is suitable for AM, updating AM if so. 548 bool SystemZDAGToDAGISel::selectAddress(SDValue Addr, 549 SystemZAddressingMode &AM) const { 550 // Start out assuming that the address will need to be loaded separately, 551 // then try to extend it as much as we can. 552 AM.Base = Addr; 553 554 // First try treating the address as a constant. 555 if (Addr.getOpcode() == ISD::Constant && 556 expandDisp(AM, true, SDValue(), 557 cast<ConstantSDNode>(Addr)->getSExtValue())) 558 ; 559 // Also see if it's a bare ADJDYNALLOC. 560 else if (Addr.getOpcode() == SystemZISD::ADJDYNALLOC && 561 expandAdjDynAlloc(AM, true, SDValue())) 562 ; 563 else 564 // Otherwise try expanding each component. 565 while (expandAddress(AM, true) || 566 (AM.Index.getNode() && expandAddress(AM, false))) 567 continue; 568 569 // Reject cases where it isn't profitable to use LA(Y). 570 if (AM.Form == SystemZAddressingMode::FormBDXLA && 571 !shouldUseLA(AM.Base.getNode(), AM.Disp, AM.Index.getNode())) 572 return false; 573 574 // Reject cases where the other instruction in a pair should be used. 575 if (!isValidDisp(AM.DR, AM.Disp)) 576 return false; 577 578 // Make sure that ADJDYNALLOC is included where necessary. 579 if (AM.isDynAlloc() && !AM.IncludesDynAlloc) 580 return false; 581 582 DEBUG(AM.dump()); 583 return true; 584 } 585 586 // Insert a node into the DAG at least before Pos. This will reposition 587 // the node as needed, and will assign it a node ID that is <= Pos's ID. 588 // Note that this does *not* preserve the uniqueness of node IDs! 589 // The selection DAG must no longer depend on their uniqueness when this 590 // function is used. 591 static void insertDAGNode(SelectionDAG *DAG, SDNode *Pos, SDValue N) { 592 if (N.getNode()->getNodeId() == -1 || 593 N.getNode()->getNodeId() > Pos->getNodeId()) { 594 DAG->RepositionNode(Pos->getIterator(), N.getNode()); 595 N.getNode()->setNodeId(Pos->getNodeId()); 596 } 597 } 598 599 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM, 600 EVT VT, SDValue &Base, 601 SDValue &Disp) const { 602 Base = AM.Base; 603 if (!Base.getNode()) 604 // Register 0 means "no base". This is mostly useful for shifts. 605 Base = CurDAG->getRegister(0, VT); 606 else if (Base.getOpcode() == ISD::FrameIndex) { 607 // Lower a FrameIndex to a TargetFrameIndex. 608 int64_t FrameIndex = cast<FrameIndexSDNode>(Base)->getIndex(); 609 Base = CurDAG->getTargetFrameIndex(FrameIndex, VT); 610 } else if (Base.getValueType() != VT) { 611 // Truncate values from i64 to i32, for shifts. 612 assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 && 613 "Unexpected truncation"); 614 SDLoc DL(Base); 615 SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base); 616 insertDAGNode(CurDAG, Base.getNode(), Trunc); 617 Base = Trunc; 618 } 619 620 // Lower the displacement to a TargetConstant. 621 Disp = CurDAG->getTargetConstant(AM.Disp, SDLoc(Base), VT); 622 } 623 624 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM, 625 EVT VT, SDValue &Base, 626 SDValue &Disp, 627 SDValue &Index) const { 628 getAddressOperands(AM, VT, Base, Disp); 629 630 Index = AM.Index; 631 if (!Index.getNode()) 632 // Register 0 means "no index". 633 Index = CurDAG->getRegister(0, VT); 634 } 635 636 bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR, 637 SDValue Addr, SDValue &Base, 638 SDValue &Disp) const { 639 SystemZAddressingMode AM(SystemZAddressingMode::FormBD, DR); 640 if (!selectAddress(Addr, AM)) 641 return false; 642 643 getAddressOperands(AM, Addr.getValueType(), Base, Disp); 644 return true; 645 } 646 647 bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR, 648 SDValue Addr, SDValue &Base, 649 SDValue &Disp) const { 650 SystemZAddressingMode AM(SystemZAddressingMode::FormBDXNormal, DR); 651 if (!selectAddress(Addr, AM) || AM.Index.getNode()) 652 return false; 653 654 getAddressOperands(AM, Addr.getValueType(), Base, Disp); 655 return true; 656 } 657 658 bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form, 659 SystemZAddressingMode::DispRange DR, 660 SDValue Addr, SDValue &Base, 661 SDValue &Disp, SDValue &Index) const { 662 SystemZAddressingMode AM(Form, DR); 663 if (!selectAddress(Addr, AM)) 664 return false; 665 666 getAddressOperands(AM, Addr.getValueType(), Base, Disp, Index); 667 return true; 668 } 669 670 bool SystemZDAGToDAGISel::selectBDVAddr12Only(SDValue Addr, SDValue Elem, 671 SDValue &Base, 672 SDValue &Disp, 673 SDValue &Index) const { 674 SDValue Regs[2]; 675 if (selectBDXAddr12Only(Addr, Regs[0], Disp, Regs[1]) && 676 Regs[0].getNode() && Regs[1].getNode()) { 677 for (unsigned int I = 0; I < 2; ++I) { 678 Base = Regs[I]; 679 Index = Regs[1 - I]; 680 // We can't tell here whether the index vector has the right type 681 // for the access; the caller needs to do that instead. 682 if (Index.getOpcode() == ISD::ZERO_EXTEND) 683 Index = Index.getOperand(0); 684 if (Index.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 685 Index.getOperand(1) == Elem) { 686 Index = Index.getOperand(0); 687 return true; 688 } 689 } 690 } 691 return false; 692 } 693 694 bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op, 695 uint64_t InsertMask) const { 696 // We're only interested in cases where the insertion is into some operand 697 // of Op, rather than into Op itself. The only useful case is an AND. 698 if (Op.getOpcode() != ISD::AND) 699 return false; 700 701 // We need a constant mask. 702 auto *MaskNode = dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode()); 703 if (!MaskNode) 704 return false; 705 706 // It's not an insertion of Op.getOperand(0) if the two masks overlap. 707 uint64_t AndMask = MaskNode->getZExtValue(); 708 if (InsertMask & AndMask) 709 return false; 710 711 // It's only an insertion if all bits are covered or are known to be zero. 712 // The inner check covers all cases but is more expensive. 713 uint64_t Used = allOnes(Op.getValueSizeInBits()); 714 if (Used != (AndMask | InsertMask)) { 715 KnownBits Known; 716 CurDAG->computeKnownBits(Op.getOperand(0), Known); 717 if (Used != (AndMask | InsertMask | Known.Zero.getZExtValue())) 718 return false; 719 } 720 721 Op = Op.getOperand(0); 722 return true; 723 } 724 725 bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG, 726 uint64_t Mask) const { 727 const SystemZInstrInfo *TII = getInstrInfo(); 728 if (RxSBG.Rotate != 0) 729 Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)); 730 Mask &= RxSBG.Mask; 731 if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) { 732 RxSBG.Mask = Mask; 733 return true; 734 } 735 return false; 736 } 737 738 // Return true if any bits of (RxSBG.Input & Mask) are significant. 739 static bool maskMatters(RxSBGOperands &RxSBG, uint64_t Mask) { 740 // Rotate the mask in the same way as RxSBG.Input is rotated. 741 if (RxSBG.Rotate != 0) 742 Mask = ((Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate))); 743 return (Mask & RxSBG.Mask) != 0; 744 } 745 746 bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { 747 SDValue N = RxSBG.Input; 748 unsigned Opcode = N.getOpcode(); 749 switch (Opcode) { 750 case ISD::TRUNCATE: { 751 if (RxSBG.Opcode == SystemZ::RNSBG) 752 return false; 753 uint64_t BitSize = N.getValueSizeInBits(); 754 uint64_t Mask = allOnes(BitSize); 755 if (!refineRxSBGMask(RxSBG, Mask)) 756 return false; 757 RxSBG.Input = N.getOperand(0); 758 return true; 759 } 760 case ISD::AND: { 761 if (RxSBG.Opcode == SystemZ::RNSBG) 762 return false; 763 764 auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 765 if (!MaskNode) 766 return false; 767 768 SDValue Input = N.getOperand(0); 769 uint64_t Mask = MaskNode->getZExtValue(); 770 if (!refineRxSBGMask(RxSBG, Mask)) { 771 // If some bits of Input are already known zeros, those bits will have 772 // been removed from the mask. See if adding them back in makes the 773 // mask suitable. 774 KnownBits Known; 775 CurDAG->computeKnownBits(Input, Known); 776 Mask |= Known.Zero.getZExtValue(); 777 if (!refineRxSBGMask(RxSBG, Mask)) 778 return false; 779 } 780 RxSBG.Input = Input; 781 return true; 782 } 783 784 case ISD::OR: { 785 if (RxSBG.Opcode != SystemZ::RNSBG) 786 return false; 787 788 auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 789 if (!MaskNode) 790 return false; 791 792 SDValue Input = N.getOperand(0); 793 uint64_t Mask = ~MaskNode->getZExtValue(); 794 if (!refineRxSBGMask(RxSBG, Mask)) { 795 // If some bits of Input are already known ones, those bits will have 796 // been removed from the mask. See if adding them back in makes the 797 // mask suitable. 798 KnownBits Known; 799 CurDAG->computeKnownBits(Input, Known); 800 Mask &= ~Known.One.getZExtValue(); 801 if (!refineRxSBGMask(RxSBG, Mask)) 802 return false; 803 } 804 RxSBG.Input = Input; 805 return true; 806 } 807 808 case ISD::ROTL: { 809 // Any 64-bit rotate left can be merged into the RxSBG. 810 if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64) 811 return false; 812 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 813 if (!CountNode) 814 return false; 815 816 RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63; 817 RxSBG.Input = N.getOperand(0); 818 return true; 819 } 820 821 case ISD::ANY_EXTEND: 822 // Bits above the extended operand are don't-care. 823 RxSBG.Input = N.getOperand(0); 824 return true; 825 826 case ISD::ZERO_EXTEND: 827 if (RxSBG.Opcode != SystemZ::RNSBG) { 828 // Restrict the mask to the extended operand. 829 unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits(); 830 if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize))) 831 return false; 832 833 RxSBG.Input = N.getOperand(0); 834 return true; 835 } 836 LLVM_FALLTHROUGH; 837 838 case ISD::SIGN_EXTEND: { 839 // Check that the extension bits are don't-care (i.e. are masked out 840 // by the final mask). 841 unsigned BitSize = N.getValueSizeInBits(); 842 unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits(); 843 if (maskMatters(RxSBG, allOnes(BitSize) - allOnes(InnerBitSize))) { 844 // In the case where only the sign bit is active, increase Rotate with 845 // the extension width. 846 if (RxSBG.Mask == 1 && RxSBG.Rotate == 1) 847 RxSBG.Rotate += (BitSize - InnerBitSize); 848 else 849 return false; 850 } 851 852 RxSBG.Input = N.getOperand(0); 853 return true; 854 } 855 856 case ISD::SHL: { 857 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 858 if (!CountNode) 859 return false; 860 861 uint64_t Count = CountNode->getZExtValue(); 862 unsigned BitSize = N.getValueSizeInBits(); 863 if (Count < 1 || Count >= BitSize) 864 return false; 865 866 if (RxSBG.Opcode == SystemZ::RNSBG) { 867 // Treat (shl X, count) as (rotl X, size-count) as long as the bottom 868 // count bits from RxSBG.Input are ignored. 869 if (maskMatters(RxSBG, allOnes(Count))) 870 return false; 871 } else { 872 // Treat (shl X, count) as (and (rotl X, count), ~0<<count). 873 if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count) << Count)) 874 return false; 875 } 876 877 RxSBG.Rotate = (RxSBG.Rotate + Count) & 63; 878 RxSBG.Input = N.getOperand(0); 879 return true; 880 } 881 882 case ISD::SRL: 883 case ISD::SRA: { 884 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 885 if (!CountNode) 886 return false; 887 888 uint64_t Count = CountNode->getZExtValue(); 889 unsigned BitSize = N.getValueSizeInBits(); 890 if (Count < 1 || Count >= BitSize) 891 return false; 892 893 if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) { 894 // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top 895 // count bits from RxSBG.Input are ignored. 896 if (maskMatters(RxSBG, allOnes(Count) << (BitSize - Count))) 897 return false; 898 } else { 899 // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count), 900 // which is similar to SLL above. 901 if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count))) 902 return false; 903 } 904 905 RxSBG.Rotate = (RxSBG.Rotate - Count) & 63; 906 RxSBG.Input = N.getOperand(0); 907 return true; 908 } 909 default: 910 return false; 911 } 912 } 913 914 SDValue SystemZDAGToDAGISel::getUNDEF(const SDLoc &DL, EVT VT) const { 915 SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT); 916 return SDValue(N, 0); 917 } 918 919 SDValue SystemZDAGToDAGISel::convertTo(const SDLoc &DL, EVT VT, 920 SDValue N) const { 921 if (N.getValueType() == MVT::i32 && VT == MVT::i64) 922 return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32, 923 DL, VT, getUNDEF(DL, MVT::i64), N); 924 if (N.getValueType() == MVT::i64 && VT == MVT::i32) 925 return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N); 926 assert(N.getValueType() == VT && "Unexpected value types"); 927 return N; 928 } 929 930 bool SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) { 931 SDLoc DL(N); 932 EVT VT = N->getValueType(0); 933 if (!VT.isInteger() || VT.getSizeInBits() > 64) 934 return false; 935 RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0)); 936 unsigned Count = 0; 937 while (expandRxSBG(RISBG)) 938 // The widening or narrowing is expected to be free. 939 // Counting widening or narrowing as a saved operation will result in 940 // preferring an R*SBG over a simple shift/logical instruction. 941 if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND && 942 RISBG.Input.getOpcode() != ISD::TRUNCATE) 943 Count += 1; 944 if (Count == 0) 945 return false; 946 947 // Prefer to use normal shift instructions over RISBG, since they can handle 948 // all cases and are sometimes shorter. 949 if (Count == 1 && N->getOpcode() != ISD::AND) 950 return false; 951 952 // Prefer register extensions like LLC over RISBG. Also prefer to start 953 // out with normal ANDs if one instruction would be enough. We can convert 954 // these ANDs into an RISBG later if a three-address instruction is useful. 955 if (RISBG.Rotate == 0) { 956 bool PreferAnd = false; 957 // Prefer AND for any 32-bit and-immediate operation. 958 if (VT == MVT::i32) 959 PreferAnd = true; 960 // As well as for any 64-bit operation that can be implemented via LLC(R), 961 // LLH(R), LLGT(R), or one of the and-immediate instructions. 962 else if (RISBG.Mask == 0xff || 963 RISBG.Mask == 0xffff || 964 RISBG.Mask == 0x7fffffff || 965 SystemZ::isImmLF(~RISBG.Mask) || 966 SystemZ::isImmHF(~RISBG.Mask)) 967 PreferAnd = true; 968 // And likewise for the LLZRGF instruction, which doesn't have a register 969 // to register version. 970 else if (auto *Load = dyn_cast<LoadSDNode>(RISBG.Input)) { 971 if (Load->getMemoryVT() == MVT::i32 && 972 (Load->getExtensionType() == ISD::EXTLOAD || 973 Load->getExtensionType() == ISD::ZEXTLOAD) && 974 RISBG.Mask == 0xffffff00 && 975 Subtarget->hasLoadAndZeroRightmostByte()) 976 PreferAnd = true; 977 } 978 if (PreferAnd) { 979 // Replace the current node with an AND. Note that the current node 980 // might already be that same AND, in which case it is already CSE'd 981 // with it, and we must not call ReplaceNode. 982 SDValue In = convertTo(DL, VT, RISBG.Input); 983 SDValue Mask = CurDAG->getConstant(RISBG.Mask, DL, VT); 984 SDValue New = CurDAG->getNode(ISD::AND, DL, VT, In, Mask); 985 if (N != New.getNode()) { 986 insertDAGNode(CurDAG, N, Mask); 987 insertDAGNode(CurDAG, N, New); 988 ReplaceNode(N, New.getNode()); 989 N = New.getNode(); 990 } 991 // Now, select the machine opcode to implement this operation. 992 SelectCode(N); 993 return true; 994 } 995 } 996 997 unsigned Opcode = SystemZ::RISBG; 998 // Prefer RISBGN if available, since it does not clobber CC. 999 if (Subtarget->hasMiscellaneousExtensions()) 1000 Opcode = SystemZ::RISBGN; 1001 EVT OpcodeVT = MVT::i64; 1002 if (VT == MVT::i32 && Subtarget->hasHighWord() && 1003 // We can only use the 32-bit instructions if all source bits are 1004 // in the low 32 bits without wrapping, both after rotation (because 1005 // of the smaller range for Start and End) and before rotation 1006 // (because the input value is truncated). 1007 RISBG.Start >= 32 && RISBG.End >= RISBG.Start && 1008 ((RISBG.Start + RISBG.Rotate) & 63) >= 32 && 1009 ((RISBG.End + RISBG.Rotate) & 63) >= 1010 ((RISBG.Start + RISBG.Rotate) & 63)) { 1011 Opcode = SystemZ::RISBMux; 1012 OpcodeVT = MVT::i32; 1013 RISBG.Start &= 31; 1014 RISBG.End &= 31; 1015 } 1016 SDValue Ops[5] = { 1017 getUNDEF(DL, OpcodeVT), 1018 convertTo(DL, OpcodeVT, RISBG.Input), 1019 CurDAG->getTargetConstant(RISBG.Start, DL, MVT::i32), 1020 CurDAG->getTargetConstant(RISBG.End | 128, DL, MVT::i32), 1021 CurDAG->getTargetConstant(RISBG.Rotate, DL, MVT::i32) 1022 }; 1023 SDValue New = convertTo( 1024 DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, OpcodeVT, Ops), 0)); 1025 ReplaceUses(N, New.getNode()); 1026 CurDAG->RemoveDeadNode(N); 1027 return true; 1028 } 1029 1030 bool SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) { 1031 SDLoc DL(N); 1032 EVT VT = N->getValueType(0); 1033 if (!VT.isInteger() || VT.getSizeInBits() > 64) 1034 return false; 1035 // Try treating each operand of N as the second operand of the RxSBG 1036 // and see which goes deepest. 1037 RxSBGOperands RxSBG[] = { 1038 RxSBGOperands(Opcode, N->getOperand(0)), 1039 RxSBGOperands(Opcode, N->getOperand(1)) 1040 }; 1041 unsigned Count[] = { 0, 0 }; 1042 for (unsigned I = 0; I < 2; ++I) 1043 while (expandRxSBG(RxSBG[I])) 1044 // The widening or narrowing is expected to be free. 1045 // Counting widening or narrowing as a saved operation will result in 1046 // preferring an R*SBG over a simple shift/logical instruction. 1047 if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND && 1048 RxSBG[I].Input.getOpcode() != ISD::TRUNCATE) 1049 Count[I] += 1; 1050 1051 // Do nothing if neither operand is suitable. 1052 if (Count[0] == 0 && Count[1] == 0) 1053 return false; 1054 1055 // Pick the deepest second operand. 1056 unsigned I = Count[0] > Count[1] ? 0 : 1; 1057 SDValue Op0 = N->getOperand(I ^ 1); 1058 1059 // Prefer IC for character insertions from memory. 1060 if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0) 1061 if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode())) 1062 if (Load->getMemoryVT() == MVT::i8) 1063 return false; 1064 1065 // See whether we can avoid an AND in the first operand by converting 1066 // ROSBG to RISBG. 1067 if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask)) { 1068 Opcode = SystemZ::RISBG; 1069 // Prefer RISBGN if available, since it does not clobber CC. 1070 if (Subtarget->hasMiscellaneousExtensions()) 1071 Opcode = SystemZ::RISBGN; 1072 } 1073 1074 SDValue Ops[5] = { 1075 convertTo(DL, MVT::i64, Op0), 1076 convertTo(DL, MVT::i64, RxSBG[I].Input), 1077 CurDAG->getTargetConstant(RxSBG[I].Start, DL, MVT::i32), 1078 CurDAG->getTargetConstant(RxSBG[I].End, DL, MVT::i32), 1079 CurDAG->getTargetConstant(RxSBG[I].Rotate, DL, MVT::i32) 1080 }; 1081 SDValue New = convertTo( 1082 DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, MVT::i64, Ops), 0)); 1083 ReplaceNode(N, New.getNode()); 1084 return true; 1085 } 1086 1087 void SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node, 1088 SDValue Op0, uint64_t UpperVal, 1089 uint64_t LowerVal) { 1090 EVT VT = Node->getValueType(0); 1091 SDLoc DL(Node); 1092 SDValue Upper = CurDAG->getConstant(UpperVal, DL, VT); 1093 if (Op0.getNode()) 1094 Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper); 1095 1096 { 1097 // When we haven't passed in Op0, Upper will be a constant. In order to 1098 // prevent folding back to the large immediate in `Or = getNode(...)` we run 1099 // SelectCode first and end up with an opaque machine node. This means that 1100 // we need to use a handle to keep track of Upper in case it gets CSE'd by 1101 // SelectCode. 1102 // 1103 // Note that in the case where Op0 is passed in we could just call 1104 // SelectCode(Upper) later, along with the SelectCode(Or), and avoid needing 1105 // the handle at all, but it's fine to do it here. 1106 // 1107 // TODO: This is a pretty hacky way to do this. Can we do something that 1108 // doesn't require a two paragraph explanation? 1109 HandleSDNode Handle(Upper); 1110 SelectCode(Upper.getNode()); 1111 Upper = Handle.getValue(); 1112 } 1113 1114 SDValue Lower = CurDAG->getConstant(LowerVal, DL, VT); 1115 SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower); 1116 1117 ReplaceUses(Node, Or.getNode()); 1118 CurDAG->RemoveDeadNode(Node); 1119 1120 SelectCode(Or.getNode()); 1121 } 1122 1123 bool SystemZDAGToDAGISel::tryGather(SDNode *N, unsigned Opcode) { 1124 SDValue ElemV = N->getOperand(2); 1125 auto *ElemN = dyn_cast<ConstantSDNode>(ElemV); 1126 if (!ElemN) 1127 return false; 1128 1129 unsigned Elem = ElemN->getZExtValue(); 1130 EVT VT = N->getValueType(0); 1131 if (Elem >= VT.getVectorNumElements()) 1132 return false; 1133 1134 auto *Load = dyn_cast<LoadSDNode>(N->getOperand(1)); 1135 if (!Load || !Load->hasOneUse()) 1136 return false; 1137 if (Load->getMemoryVT().getSizeInBits() != 1138 Load->getValueType(0).getSizeInBits()) 1139 return false; 1140 1141 SDValue Base, Disp, Index; 1142 if (!selectBDVAddr12Only(Load->getBasePtr(), ElemV, Base, Disp, Index) || 1143 Index.getValueType() != VT.changeVectorElementTypeToInteger()) 1144 return false; 1145 1146 SDLoc DL(Load); 1147 SDValue Ops[] = { 1148 N->getOperand(0), Base, Disp, Index, 1149 CurDAG->getTargetConstant(Elem, DL, MVT::i32), Load->getChain() 1150 }; 1151 SDNode *Res = CurDAG->getMachineNode(Opcode, DL, VT, MVT::Other, Ops); 1152 ReplaceUses(SDValue(Load, 1), SDValue(Res, 1)); 1153 ReplaceNode(N, Res); 1154 return true; 1155 } 1156 1157 bool SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) { 1158 SDValue Value = Store->getValue(); 1159 if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 1160 return false; 1161 if (Store->getMemoryVT().getSizeInBits() != Value.getValueSizeInBits()) 1162 return false; 1163 1164 SDValue ElemV = Value.getOperand(1); 1165 auto *ElemN = dyn_cast<ConstantSDNode>(ElemV); 1166 if (!ElemN) 1167 return false; 1168 1169 SDValue Vec = Value.getOperand(0); 1170 EVT VT = Vec.getValueType(); 1171 unsigned Elem = ElemN->getZExtValue(); 1172 if (Elem >= VT.getVectorNumElements()) 1173 return false; 1174 1175 SDValue Base, Disp, Index; 1176 if (!selectBDVAddr12Only(Store->getBasePtr(), ElemV, Base, Disp, Index) || 1177 Index.getValueType() != VT.changeVectorElementTypeToInteger()) 1178 return false; 1179 1180 SDLoc DL(Store); 1181 SDValue Ops[] = { 1182 Vec, Base, Disp, Index, CurDAG->getTargetConstant(Elem, DL, MVT::i32), 1183 Store->getChain() 1184 }; 1185 ReplaceNode(Store, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops)); 1186 return true; 1187 } 1188 1189 bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store, 1190 LoadSDNode *Load) const { 1191 // Check that the two memory operands have the same size. 1192 if (Load->getMemoryVT() != Store->getMemoryVT()) 1193 return false; 1194 1195 // Volatility stops an access from being decomposed. 1196 if (Load->isVolatile() || Store->isVolatile()) 1197 return false; 1198 1199 // There's no chance of overlap if the load is invariant. 1200 if (Load->isInvariant() && Load->isDereferenceable()) 1201 return true; 1202 1203 // Otherwise we need to check whether there's an alias. 1204 const Value *V1 = Load->getMemOperand()->getValue(); 1205 const Value *V2 = Store->getMemOperand()->getValue(); 1206 if (!V1 || !V2) 1207 return false; 1208 1209 // Reject equality. 1210 uint64_t Size = Load->getMemoryVT().getStoreSize(); 1211 int64_t End1 = Load->getSrcValueOffset() + Size; 1212 int64_t End2 = Store->getSrcValueOffset() + Size; 1213 if (V1 == V2 && End1 == End2) 1214 return false; 1215 1216 return !AA->alias(MemoryLocation(V1, End1, Load->getAAInfo()), 1217 MemoryLocation(V2, End2, Store->getAAInfo())); 1218 } 1219 1220 bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const { 1221 auto *Store = cast<StoreSDNode>(N); 1222 auto *Load = cast<LoadSDNode>(Store->getValue()); 1223 1224 // Prefer not to use MVC if either address can use ... RELATIVE LONG 1225 // instructions. 1226 uint64_t Size = Load->getMemoryVT().getStoreSize(); 1227 if (Size > 1 && Size <= 8) { 1228 // Prefer LHRL, LRL and LGRL. 1229 if (SystemZISD::isPCREL(Load->getBasePtr().getOpcode())) 1230 return false; 1231 // Prefer STHRL, STRL and STGRL. 1232 if (SystemZISD::isPCREL(Store->getBasePtr().getOpcode())) 1233 return false; 1234 } 1235 1236 return canUseBlockOperation(Store, Load); 1237 } 1238 1239 bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N, 1240 unsigned I) const { 1241 auto *StoreA = cast<StoreSDNode>(N); 1242 auto *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I)); 1243 auto *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I)); 1244 return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB); 1245 } 1246 1247 void SystemZDAGToDAGISel::Select(SDNode *Node) { 1248 // Dump information about the Node being selected 1249 DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n"); 1250 1251 // If we have a custom node, we already have selected! 1252 if (Node->isMachineOpcode()) { 1253 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); 1254 Node->setNodeId(-1); 1255 return; 1256 } 1257 1258 unsigned Opcode = Node->getOpcode(); 1259 switch (Opcode) { 1260 case ISD::OR: 1261 if (Node->getOperand(1).getOpcode() != ISD::Constant) 1262 if (tryRxSBG(Node, SystemZ::ROSBG)) 1263 return; 1264 goto or_xor; 1265 1266 case ISD::XOR: 1267 if (Node->getOperand(1).getOpcode() != ISD::Constant) 1268 if (tryRxSBG(Node, SystemZ::RXSBG)) 1269 return; 1270 // Fall through. 1271 or_xor: 1272 // If this is a 64-bit operation in which both 32-bit halves are nonzero, 1273 // split the operation into two. If both operands here happen to be 1274 // constant, leave this to common code to optimize. 1275 if (Node->getValueType(0) == MVT::i64 && 1276 Node->getOperand(0).getOpcode() != ISD::Constant) 1277 if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) { 1278 uint64_t Val = Op1->getZExtValue(); 1279 if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) { 1280 splitLargeImmediate(Opcode, Node, Node->getOperand(0), 1281 Val - uint32_t(Val), uint32_t(Val)); 1282 return; 1283 } 1284 } 1285 break; 1286 1287 case ISD::AND: 1288 if (Node->getOperand(1).getOpcode() != ISD::Constant) 1289 if (tryRxSBG(Node, SystemZ::RNSBG)) 1290 return; 1291 LLVM_FALLTHROUGH; 1292 case ISD::ROTL: 1293 case ISD::SHL: 1294 case ISD::SRL: 1295 case ISD::ZERO_EXTEND: 1296 if (tryRISBGZero(Node)) 1297 return; 1298 break; 1299 1300 case ISD::Constant: 1301 // If this is a 64-bit constant that is out of the range of LLILF, 1302 // LLIHF and LGFI, split it into two 32-bit pieces. 1303 if (Node->getValueType(0) == MVT::i64) { 1304 uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue(); 1305 if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) { 1306 splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val), 1307 uint32_t(Val)); 1308 return; 1309 } 1310 } 1311 break; 1312 1313 case SystemZISD::SELECT_CCMASK: { 1314 SDValue Op0 = Node->getOperand(0); 1315 SDValue Op1 = Node->getOperand(1); 1316 // Prefer to put any load first, so that it can be matched as a 1317 // conditional load. Likewise for constants in range for LOCHI. 1318 if ((Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) || 1319 (Subtarget->hasLoadStoreOnCond2() && 1320 Node->getValueType(0).isInteger() && 1321 Op1.getOpcode() == ISD::Constant && 1322 isInt<16>(cast<ConstantSDNode>(Op1)->getSExtValue()) && 1323 !(Op0.getOpcode() == ISD::Constant && 1324 isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) { 1325 SDValue CCValid = Node->getOperand(2); 1326 SDValue CCMask = Node->getOperand(3); 1327 uint64_t ConstCCValid = 1328 cast<ConstantSDNode>(CCValid.getNode())->getZExtValue(); 1329 uint64_t ConstCCMask = 1330 cast<ConstantSDNode>(CCMask.getNode())->getZExtValue(); 1331 // Invert the condition. 1332 CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask, SDLoc(Node), 1333 CCMask.getValueType()); 1334 SDValue Op4 = Node->getOperand(4); 1335 Node = CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4); 1336 } 1337 break; 1338 } 1339 1340 case ISD::INSERT_VECTOR_ELT: { 1341 EVT VT = Node->getValueType(0); 1342 unsigned ElemBitSize = VT.getScalarSizeInBits(); 1343 if (ElemBitSize == 32) { 1344 if (tryGather(Node, SystemZ::VGEF)) 1345 return; 1346 } else if (ElemBitSize == 64) { 1347 if (tryGather(Node, SystemZ::VGEG)) 1348 return; 1349 } 1350 break; 1351 } 1352 1353 case ISD::STORE: { 1354 auto *Store = cast<StoreSDNode>(Node); 1355 unsigned ElemBitSize = Store->getValue().getValueSizeInBits(); 1356 if (ElemBitSize == 32) { 1357 if (tryScatter(Store, SystemZ::VSCEF)) 1358 return; 1359 } else if (ElemBitSize == 64) { 1360 if (tryScatter(Store, SystemZ::VSCEG)) 1361 return; 1362 } 1363 break; 1364 } 1365 } 1366 1367 SelectCode(Node); 1368 } 1369 1370 bool SystemZDAGToDAGISel:: 1371 SelectInlineAsmMemoryOperand(const SDValue &Op, 1372 unsigned ConstraintID, 1373 std::vector<SDValue> &OutOps) { 1374 SystemZAddressingMode::AddrForm Form; 1375 SystemZAddressingMode::DispRange DispRange; 1376 SDValue Base, Disp, Index; 1377 1378 switch(ConstraintID) { 1379 default: 1380 llvm_unreachable("Unexpected asm memory constraint"); 1381 case InlineAsm::Constraint_i: 1382 case InlineAsm::Constraint_Q: 1383 // Accept an address with a short displacement, but no index. 1384 Form = SystemZAddressingMode::FormBD; 1385 DispRange = SystemZAddressingMode::Disp12Only; 1386 break; 1387 case InlineAsm::Constraint_R: 1388 // Accept an address with a short displacement and an index. 1389 Form = SystemZAddressingMode::FormBDXNormal; 1390 DispRange = SystemZAddressingMode::Disp12Only; 1391 break; 1392 case InlineAsm::Constraint_S: 1393 // Accept an address with a long displacement, but no index. 1394 Form = SystemZAddressingMode::FormBD; 1395 DispRange = SystemZAddressingMode::Disp20Only; 1396 break; 1397 case InlineAsm::Constraint_T: 1398 case InlineAsm::Constraint_m: 1399 case InlineAsm::Constraint_o: 1400 // Accept an address with a long displacement and an index. 1401 // m works the same as T, as this is the most general case. 1402 // We don't really have any special handling of "offsettable" 1403 // memory addresses, so just treat o the same as m. 1404 Form = SystemZAddressingMode::FormBDXNormal; 1405 DispRange = SystemZAddressingMode::Disp20Only; 1406 break; 1407 } 1408 1409 if (selectBDXAddr(Form, DispRange, Op, Base, Disp, Index)) { 1410 const TargetRegisterClass *TRC = 1411 Subtarget->getRegisterInfo()->getPointerRegClass(*MF); 1412 SDLoc DL(Base); 1413 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), DL, MVT::i32); 1414 1415 // Make sure that the base address doesn't go into %r0. 1416 // If it's a TargetFrameIndex or a fixed register, we shouldn't do anything. 1417 if (Base.getOpcode() != ISD::TargetFrameIndex && 1418 Base.getOpcode() != ISD::Register) { 1419 Base = 1420 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 1421 DL, Base.getValueType(), 1422 Base, RC), 0); 1423 } 1424 1425 // Make sure that the index register isn't assigned to %r0 either. 1426 if (Index.getOpcode() != ISD::Register) { 1427 Index = 1428 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 1429 DL, Index.getValueType(), 1430 Index, RC), 0); 1431 } 1432 1433 OutOps.push_back(Base); 1434 OutOps.push_back(Disp); 1435 OutOps.push_back(Index); 1436 return false; 1437 } 1438 1439 return true; 1440 } 1441