1 //===-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ --===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines an instruction selector for the SystemZ target. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZTargetMachine.h" 15 #include "llvm/Analysis/AliasAnalysis.h" 16 #include "llvm/CodeGen/SelectionDAGISel.h" 17 #include "llvm/Support/Debug.h" 18 #include "llvm/Support/KnownBits.h" 19 #include "llvm/Support/raw_ostream.h" 20 21 using namespace llvm; 22 23 #define DEBUG_TYPE "systemz-isel" 24 25 namespace { 26 // Used to build addressing modes. 27 struct SystemZAddressingMode { 28 // The shape of the address. 29 enum AddrForm { 30 // base+displacement 31 FormBD, 32 33 // base+displacement+index for load and store operands 34 FormBDXNormal, 35 36 // base+displacement+index for load address operands 37 FormBDXLA, 38 39 // base+displacement+index+ADJDYNALLOC 40 FormBDXDynAlloc 41 }; 42 AddrForm Form; 43 44 // The type of displacement. The enum names here correspond directly 45 // to the definitions in SystemZOperand.td. We could split them into 46 // flags -- single/pair, 128-bit, etc. -- but it hardly seems worth it. 47 enum DispRange { 48 Disp12Only, 49 Disp12Pair, 50 Disp20Only, 51 Disp20Only128, 52 Disp20Pair 53 }; 54 DispRange DR; 55 56 // The parts of the address. The address is equivalent to: 57 // 58 // Base + Disp + Index + (IncludesDynAlloc ? ADJDYNALLOC : 0) 59 SDValue Base; 60 int64_t Disp; 61 SDValue Index; 62 bool IncludesDynAlloc; 63 64 SystemZAddressingMode(AddrForm form, DispRange dr) 65 : Form(form), DR(dr), Base(), Disp(0), Index(), 66 IncludesDynAlloc(false) {} 67 68 // True if the address can have an index register. 69 bool hasIndexField() { return Form != FormBD; } 70 71 // True if the address can (and must) include ADJDYNALLOC. 72 bool isDynAlloc() { return Form == FormBDXDynAlloc; } 73 74 void dump() { 75 errs() << "SystemZAddressingMode " << this << '\n'; 76 77 errs() << " Base "; 78 if (Base.getNode()) 79 Base.getNode()->dump(); 80 else 81 errs() << "null\n"; 82 83 if (hasIndexField()) { 84 errs() << " Index "; 85 if (Index.getNode()) 86 Index.getNode()->dump(); 87 else 88 errs() << "null\n"; 89 } 90 91 errs() << " Disp " << Disp; 92 if (IncludesDynAlloc) 93 errs() << " + ADJDYNALLOC"; 94 errs() << '\n'; 95 } 96 }; 97 98 // Return a mask with Count low bits set. 99 static uint64_t allOnes(unsigned int Count) { 100 assert(Count <= 64); 101 if (Count > 63) 102 return UINT64_MAX; 103 return (uint64_t(1) << Count) - 1; 104 } 105 106 // Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation 107 // given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and 108 // Rotate (I5). The combined operand value is effectively: 109 // 110 // (or (rotl Input, Rotate), ~Mask) 111 // 112 // for RNSBG and: 113 // 114 // (and (rotl Input, Rotate), Mask) 115 // 116 // otherwise. The output value has BitSize bits, although Input may be 117 // narrower (in which case the upper bits are don't care), or wider (in which 118 // case the result will be truncated as part of the operation). 119 struct RxSBGOperands { 120 RxSBGOperands(unsigned Op, SDValue N) 121 : Opcode(Op), BitSize(N.getValueSizeInBits()), 122 Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63), 123 Rotate(0) {} 124 125 unsigned Opcode; 126 unsigned BitSize; 127 uint64_t Mask; 128 SDValue Input; 129 unsigned Start; 130 unsigned End; 131 unsigned Rotate; 132 }; 133 134 class SystemZDAGToDAGISel : public SelectionDAGISel { 135 const SystemZSubtarget *Subtarget; 136 137 // Used by SystemZOperands.td to create integer constants. 138 inline SDValue getImm(const SDNode *Node, uint64_t Imm) const { 139 return CurDAG->getTargetConstant(Imm, SDLoc(Node), Node->getValueType(0)); 140 } 141 142 const SystemZTargetMachine &getTargetMachine() const { 143 return static_cast<const SystemZTargetMachine &>(TM); 144 } 145 146 const SystemZInstrInfo *getInstrInfo() const { 147 return Subtarget->getInstrInfo(); 148 } 149 150 // Try to fold more of the base or index of AM into AM, where IsBase 151 // selects between the base and index. 152 bool expandAddress(SystemZAddressingMode &AM, bool IsBase) const; 153 154 // Try to describe N in AM, returning true on success. 155 bool selectAddress(SDValue N, SystemZAddressingMode &AM) const; 156 157 // Extract individual target operands from matched address AM. 158 void getAddressOperands(const SystemZAddressingMode &AM, EVT VT, 159 SDValue &Base, SDValue &Disp) const; 160 void getAddressOperands(const SystemZAddressingMode &AM, EVT VT, 161 SDValue &Base, SDValue &Disp, SDValue &Index) const; 162 163 // Try to match Addr as a FormBD address with displacement type DR. 164 // Return true on success, storing the base and displacement in 165 // Base and Disp respectively. 166 bool selectBDAddr(SystemZAddressingMode::DispRange DR, SDValue Addr, 167 SDValue &Base, SDValue &Disp) const; 168 169 // Try to match Addr as a FormBDX address with displacement type DR. 170 // Return true on success and if the result had no index. Store the 171 // base and displacement in Base and Disp respectively. 172 bool selectMVIAddr(SystemZAddressingMode::DispRange DR, SDValue Addr, 173 SDValue &Base, SDValue &Disp) const; 174 175 // Try to match Addr as a FormBDX* address of form Form with 176 // displacement type DR. Return true on success, storing the base, 177 // displacement and index in Base, Disp and Index respectively. 178 bool selectBDXAddr(SystemZAddressingMode::AddrForm Form, 179 SystemZAddressingMode::DispRange DR, SDValue Addr, 180 SDValue &Base, SDValue &Disp, SDValue &Index) const; 181 182 // PC-relative address matching routines used by SystemZOperands.td. 183 bool selectPCRelAddress(SDValue Addr, SDValue &Target) const { 184 if (SystemZISD::isPCREL(Addr.getOpcode())) { 185 Target = Addr.getOperand(0); 186 return true; 187 } 188 return false; 189 } 190 191 // BD matching routines used by SystemZOperands.td. 192 bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) const { 193 return selectBDAddr(SystemZAddressingMode::Disp12Only, Addr, Base, Disp); 194 } 195 bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 196 return selectBDAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp); 197 } 198 bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) const { 199 return selectBDAddr(SystemZAddressingMode::Disp20Only, Addr, Base, Disp); 200 } 201 bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 202 return selectBDAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp); 203 } 204 205 // MVI matching routines used by SystemZOperands.td. 206 bool selectMVIAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 207 return selectMVIAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp); 208 } 209 bool selectMVIAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 210 return selectMVIAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp); 211 } 212 213 // BDX matching routines used by SystemZOperands.td. 214 bool selectBDXAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp, 215 SDValue &Index) const { 216 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 217 SystemZAddressingMode::Disp12Only, 218 Addr, Base, Disp, Index); 219 } 220 bool selectBDXAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 221 SDValue &Index) const { 222 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 223 SystemZAddressingMode::Disp12Pair, 224 Addr, Base, Disp, Index); 225 } 226 bool selectDynAlloc12Only(SDValue Addr, SDValue &Base, SDValue &Disp, 227 SDValue &Index) const { 228 return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc, 229 SystemZAddressingMode::Disp12Only, 230 Addr, Base, Disp, Index); 231 } 232 bool selectBDXAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp, 233 SDValue &Index) const { 234 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 235 SystemZAddressingMode::Disp20Only, 236 Addr, Base, Disp, Index); 237 } 238 bool selectBDXAddr20Only128(SDValue Addr, SDValue &Base, SDValue &Disp, 239 SDValue &Index) const { 240 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 241 SystemZAddressingMode::Disp20Only128, 242 Addr, Base, Disp, Index); 243 } 244 bool selectBDXAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 245 SDValue &Index) const { 246 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 247 SystemZAddressingMode::Disp20Pair, 248 Addr, Base, Disp, Index); 249 } 250 bool selectLAAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 251 SDValue &Index) const { 252 return selectBDXAddr(SystemZAddressingMode::FormBDXLA, 253 SystemZAddressingMode::Disp12Pair, 254 Addr, Base, Disp, Index); 255 } 256 bool selectLAAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 257 SDValue &Index) const { 258 return selectBDXAddr(SystemZAddressingMode::FormBDXLA, 259 SystemZAddressingMode::Disp20Pair, 260 Addr, Base, Disp, Index); 261 } 262 263 // Try to match Addr as an address with a base, 12-bit displacement 264 // and index, where the index is element Elem of a vector. 265 // Return true on success, storing the base, displacement and vector 266 // in Base, Disp and Index respectively. 267 bool selectBDVAddr12Only(SDValue Addr, SDValue Elem, SDValue &Base, 268 SDValue &Disp, SDValue &Index) const; 269 270 // Check whether (or Op (and X InsertMask)) is effectively an insertion 271 // of X into bits InsertMask of some Y != Op. Return true if so and 272 // set Op to that Y. 273 bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask) const; 274 275 // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used. 276 // Return true on success. 277 bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) const; 278 279 // Try to fold some of RxSBG.Input into other fields of RxSBG. 280 // Return true on success. 281 bool expandRxSBG(RxSBGOperands &RxSBG) const; 282 283 // Return an undefined value of type VT. 284 SDValue getUNDEF(const SDLoc &DL, EVT VT) const; 285 286 // Convert N to VT, if it isn't already. 287 SDValue convertTo(const SDLoc &DL, EVT VT, SDValue N) const; 288 289 // Try to implement AND or shift node N using RISBG with the zero flag set. 290 // Return the selected node on success, otherwise return null. 291 bool tryRISBGZero(SDNode *N); 292 293 // Try to use RISBG or Opcode to implement OR or XOR node N. 294 // Return the selected node on success, otherwise return null. 295 bool tryRxSBG(SDNode *N, unsigned Opcode); 296 297 // If Op0 is null, then Node is a constant that can be loaded using: 298 // 299 // (Opcode UpperVal LowerVal) 300 // 301 // If Op0 is nonnull, then Node can be implemented using: 302 // 303 // (Opcode (Opcode Op0 UpperVal) LowerVal) 304 void splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0, 305 uint64_t UpperVal, uint64_t LowerVal); 306 307 // Try to use gather instruction Opcode to implement vector insertion N. 308 bool tryGather(SDNode *N, unsigned Opcode); 309 310 // Try to use scatter instruction Opcode to implement store Store. 311 bool tryScatter(StoreSDNode *Store, unsigned Opcode); 312 313 // Return true if Load and Store are loads and stores of the same size 314 // and are guaranteed not to overlap. Such operations can be implemented 315 // using block (SS-format) instructions. 316 // 317 // Partial overlap would lead to incorrect code, since the block operations 318 // are logically bytewise, even though they have a fast path for the 319 // non-overlapping case. We also need to avoid full overlap (i.e. two 320 // addresses that might be equal at run time) because although that case 321 // would be handled correctly, it might be implemented by millicode. 322 bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const; 323 324 // N is a (store (load Y), X) pattern. Return true if it can use an MVC 325 // from Y to X. 326 bool storeLoadCanUseMVC(SDNode *N) const; 327 328 // N is a (store (op (load A[0]), (load A[1])), X) pattern. Return true 329 // if A[1 - I] == X and if N can use a block operation like NC from A[I] 330 // to X. 331 bool storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const; 332 333 // Try to expand a boolean SELECT_CCMASK using an IPM sequence. 334 SDValue expandSelectBoolean(SDNode *Node); 335 336 public: 337 SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel) 338 : SelectionDAGISel(TM, OptLevel) {} 339 340 bool runOnMachineFunction(MachineFunction &MF) override { 341 Subtarget = &MF.getSubtarget<SystemZSubtarget>(); 342 return SelectionDAGISel::runOnMachineFunction(MF); 343 } 344 345 // Override MachineFunctionPass. 346 StringRef getPassName() const override { 347 return "SystemZ DAG->DAG Pattern Instruction Selection"; 348 } 349 350 // Override SelectionDAGISel. 351 void Select(SDNode *Node) override; 352 bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, 353 std::vector<SDValue> &OutOps) override; 354 void PreprocessISelDAG() override; 355 356 // Include the pieces autogenerated from the target description. 357 #include "SystemZGenDAGISel.inc" 358 }; 359 } // end anonymous namespace 360 361 FunctionPass *llvm::createSystemZISelDag(SystemZTargetMachine &TM, 362 CodeGenOpt::Level OptLevel) { 363 return new SystemZDAGToDAGISel(TM, OptLevel); 364 } 365 366 // Return true if Val should be selected as a displacement for an address 367 // with range DR. Here we're interested in the range of both the instruction 368 // described by DR and of any pairing instruction. 369 static bool selectDisp(SystemZAddressingMode::DispRange DR, int64_t Val) { 370 switch (DR) { 371 case SystemZAddressingMode::Disp12Only: 372 return isUInt<12>(Val); 373 374 case SystemZAddressingMode::Disp12Pair: 375 case SystemZAddressingMode::Disp20Only: 376 case SystemZAddressingMode::Disp20Pair: 377 return isInt<20>(Val); 378 379 case SystemZAddressingMode::Disp20Only128: 380 return isInt<20>(Val) && isInt<20>(Val + 8); 381 } 382 llvm_unreachable("Unhandled displacement range"); 383 } 384 385 // Change the base or index in AM to Value, where IsBase selects 386 // between the base and index. 387 static void changeComponent(SystemZAddressingMode &AM, bool IsBase, 388 SDValue Value) { 389 if (IsBase) 390 AM.Base = Value; 391 else 392 AM.Index = Value; 393 } 394 395 // The base or index of AM is equivalent to Value + ADJDYNALLOC, 396 // where IsBase selects between the base and index. Try to fold the 397 // ADJDYNALLOC into AM. 398 static bool expandAdjDynAlloc(SystemZAddressingMode &AM, bool IsBase, 399 SDValue Value) { 400 if (AM.isDynAlloc() && !AM.IncludesDynAlloc) { 401 changeComponent(AM, IsBase, Value); 402 AM.IncludesDynAlloc = true; 403 return true; 404 } 405 return false; 406 } 407 408 // The base of AM is equivalent to Base + Index. Try to use Index as 409 // the index register. 410 static bool expandIndex(SystemZAddressingMode &AM, SDValue Base, 411 SDValue Index) { 412 if (AM.hasIndexField() && !AM.Index.getNode()) { 413 AM.Base = Base; 414 AM.Index = Index; 415 return true; 416 } 417 return false; 418 } 419 420 // The base or index of AM is equivalent to Op0 + Op1, where IsBase selects 421 // between the base and index. Try to fold Op1 into AM's displacement. 422 static bool expandDisp(SystemZAddressingMode &AM, bool IsBase, 423 SDValue Op0, uint64_t Op1) { 424 // First try adjusting the displacement. 425 int64_t TestDisp = AM.Disp + Op1; 426 if (selectDisp(AM.DR, TestDisp)) { 427 changeComponent(AM, IsBase, Op0); 428 AM.Disp = TestDisp; 429 return true; 430 } 431 432 // We could consider forcing the displacement into a register and 433 // using it as an index, but it would need to be carefully tuned. 434 return false; 435 } 436 437 bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM, 438 bool IsBase) const { 439 SDValue N = IsBase ? AM.Base : AM.Index; 440 unsigned Opcode = N.getOpcode(); 441 if (Opcode == ISD::TRUNCATE) { 442 N = N.getOperand(0); 443 Opcode = N.getOpcode(); 444 } 445 if (Opcode == ISD::ADD || CurDAG->isBaseWithConstantOffset(N)) { 446 SDValue Op0 = N.getOperand(0); 447 SDValue Op1 = N.getOperand(1); 448 449 unsigned Op0Code = Op0->getOpcode(); 450 unsigned Op1Code = Op1->getOpcode(); 451 452 if (Op0Code == SystemZISD::ADJDYNALLOC) 453 return expandAdjDynAlloc(AM, IsBase, Op1); 454 if (Op1Code == SystemZISD::ADJDYNALLOC) 455 return expandAdjDynAlloc(AM, IsBase, Op0); 456 457 if (Op0Code == ISD::Constant) 458 return expandDisp(AM, IsBase, Op1, 459 cast<ConstantSDNode>(Op0)->getSExtValue()); 460 if (Op1Code == ISD::Constant) 461 return expandDisp(AM, IsBase, Op0, 462 cast<ConstantSDNode>(Op1)->getSExtValue()); 463 464 if (IsBase && expandIndex(AM, Op0, Op1)) 465 return true; 466 } 467 if (Opcode == SystemZISD::PCREL_OFFSET) { 468 SDValue Full = N.getOperand(0); 469 SDValue Base = N.getOperand(1); 470 SDValue Anchor = Base.getOperand(0); 471 uint64_t Offset = (cast<GlobalAddressSDNode>(Full)->getOffset() - 472 cast<GlobalAddressSDNode>(Anchor)->getOffset()); 473 return expandDisp(AM, IsBase, Base, Offset); 474 } 475 return false; 476 } 477 478 // Return true if an instruction with displacement range DR should be 479 // used for displacement value Val. selectDisp(DR, Val) must already hold. 480 static bool isValidDisp(SystemZAddressingMode::DispRange DR, int64_t Val) { 481 assert(selectDisp(DR, Val) && "Invalid displacement"); 482 switch (DR) { 483 case SystemZAddressingMode::Disp12Only: 484 case SystemZAddressingMode::Disp20Only: 485 case SystemZAddressingMode::Disp20Only128: 486 return true; 487 488 case SystemZAddressingMode::Disp12Pair: 489 // Use the other instruction if the displacement is too large. 490 return isUInt<12>(Val); 491 492 case SystemZAddressingMode::Disp20Pair: 493 // Use the other instruction if the displacement is small enough. 494 return !isUInt<12>(Val); 495 } 496 llvm_unreachable("Unhandled displacement range"); 497 } 498 499 // Return true if Base + Disp + Index should be performed by LA(Y). 500 static bool shouldUseLA(SDNode *Base, int64_t Disp, SDNode *Index) { 501 // Don't use LA(Y) for constants. 502 if (!Base) 503 return false; 504 505 // Always use LA(Y) for frame addresses, since we know that the destination 506 // register is almost always (perhaps always) going to be different from 507 // the frame register. 508 if (Base->getOpcode() == ISD::FrameIndex) 509 return true; 510 511 if (Disp) { 512 // Always use LA(Y) if there is a base, displacement and index. 513 if (Index) 514 return true; 515 516 // Always use LA if the displacement is small enough. It should always 517 // be no worse than AGHI (and better if it avoids a move). 518 if (isUInt<12>(Disp)) 519 return true; 520 521 // For similar reasons, always use LAY if the constant is too big for AGHI. 522 // LAY should be no worse than AGFI. 523 if (!isInt<16>(Disp)) 524 return true; 525 } else { 526 // Don't use LA for plain registers. 527 if (!Index) 528 return false; 529 530 // Don't use LA for plain addition if the index operand is only used 531 // once. It should be a natural two-operand addition in that case. 532 if (Index->hasOneUse()) 533 return false; 534 535 // Prefer addition if the second operation is sign-extended, in the 536 // hope of using AGF. 537 unsigned IndexOpcode = Index->getOpcode(); 538 if (IndexOpcode == ISD::SIGN_EXTEND || 539 IndexOpcode == ISD::SIGN_EXTEND_INREG) 540 return false; 541 } 542 543 // Don't use LA for two-operand addition if either operand is only 544 // used once. The addition instructions are better in that case. 545 if (Base->hasOneUse()) 546 return false; 547 548 return true; 549 } 550 551 // Return true if Addr is suitable for AM, updating AM if so. 552 bool SystemZDAGToDAGISel::selectAddress(SDValue Addr, 553 SystemZAddressingMode &AM) const { 554 // Start out assuming that the address will need to be loaded separately, 555 // then try to extend it as much as we can. 556 AM.Base = Addr; 557 558 // First try treating the address as a constant. 559 if (Addr.getOpcode() == ISD::Constant && 560 expandDisp(AM, true, SDValue(), 561 cast<ConstantSDNode>(Addr)->getSExtValue())) 562 ; 563 // Also see if it's a bare ADJDYNALLOC. 564 else if (Addr.getOpcode() == SystemZISD::ADJDYNALLOC && 565 expandAdjDynAlloc(AM, true, SDValue())) 566 ; 567 else 568 // Otherwise try expanding each component. 569 while (expandAddress(AM, true) || 570 (AM.Index.getNode() && expandAddress(AM, false))) 571 continue; 572 573 // Reject cases where it isn't profitable to use LA(Y). 574 if (AM.Form == SystemZAddressingMode::FormBDXLA && 575 !shouldUseLA(AM.Base.getNode(), AM.Disp, AM.Index.getNode())) 576 return false; 577 578 // Reject cases where the other instruction in a pair should be used. 579 if (!isValidDisp(AM.DR, AM.Disp)) 580 return false; 581 582 // Make sure that ADJDYNALLOC is included where necessary. 583 if (AM.isDynAlloc() && !AM.IncludesDynAlloc) 584 return false; 585 586 DEBUG(AM.dump()); 587 return true; 588 } 589 590 // Insert a node into the DAG at least before Pos. This will reposition 591 // the node as needed, and will assign it a node ID that is <= Pos's ID. 592 // Note that this does *not* preserve the uniqueness of node IDs! 593 // The selection DAG must no longer depend on their uniqueness when this 594 // function is used. 595 static void insertDAGNode(SelectionDAG *DAG, SDNode *Pos, SDValue N) { 596 if (N.getNode()->getNodeId() == -1 || 597 N.getNode()->getNodeId() > Pos->getNodeId()) { 598 DAG->RepositionNode(Pos->getIterator(), N.getNode()); 599 N.getNode()->setNodeId(Pos->getNodeId()); 600 } 601 } 602 603 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM, 604 EVT VT, SDValue &Base, 605 SDValue &Disp) const { 606 Base = AM.Base; 607 if (!Base.getNode()) 608 // Register 0 means "no base". This is mostly useful for shifts. 609 Base = CurDAG->getRegister(0, VT); 610 else if (Base.getOpcode() == ISD::FrameIndex) { 611 // Lower a FrameIndex to a TargetFrameIndex. 612 int64_t FrameIndex = cast<FrameIndexSDNode>(Base)->getIndex(); 613 Base = CurDAG->getTargetFrameIndex(FrameIndex, VT); 614 } else if (Base.getValueType() != VT) { 615 // Truncate values from i64 to i32, for shifts. 616 assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 && 617 "Unexpected truncation"); 618 SDLoc DL(Base); 619 SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base); 620 insertDAGNode(CurDAG, Base.getNode(), Trunc); 621 Base = Trunc; 622 } 623 624 // Lower the displacement to a TargetConstant. 625 Disp = CurDAG->getTargetConstant(AM.Disp, SDLoc(Base), VT); 626 } 627 628 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM, 629 EVT VT, SDValue &Base, 630 SDValue &Disp, 631 SDValue &Index) const { 632 getAddressOperands(AM, VT, Base, Disp); 633 634 Index = AM.Index; 635 if (!Index.getNode()) 636 // Register 0 means "no index". 637 Index = CurDAG->getRegister(0, VT); 638 } 639 640 bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR, 641 SDValue Addr, SDValue &Base, 642 SDValue &Disp) const { 643 SystemZAddressingMode AM(SystemZAddressingMode::FormBD, DR); 644 if (!selectAddress(Addr, AM)) 645 return false; 646 647 getAddressOperands(AM, Addr.getValueType(), Base, Disp); 648 return true; 649 } 650 651 bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR, 652 SDValue Addr, SDValue &Base, 653 SDValue &Disp) const { 654 SystemZAddressingMode AM(SystemZAddressingMode::FormBDXNormal, DR); 655 if (!selectAddress(Addr, AM) || AM.Index.getNode()) 656 return false; 657 658 getAddressOperands(AM, Addr.getValueType(), Base, Disp); 659 return true; 660 } 661 662 bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form, 663 SystemZAddressingMode::DispRange DR, 664 SDValue Addr, SDValue &Base, 665 SDValue &Disp, SDValue &Index) const { 666 SystemZAddressingMode AM(Form, DR); 667 if (!selectAddress(Addr, AM)) 668 return false; 669 670 getAddressOperands(AM, Addr.getValueType(), Base, Disp, Index); 671 return true; 672 } 673 674 bool SystemZDAGToDAGISel::selectBDVAddr12Only(SDValue Addr, SDValue Elem, 675 SDValue &Base, 676 SDValue &Disp, 677 SDValue &Index) const { 678 SDValue Regs[2]; 679 if (selectBDXAddr12Only(Addr, Regs[0], Disp, Regs[1]) && 680 Regs[0].getNode() && Regs[1].getNode()) { 681 for (unsigned int I = 0; I < 2; ++I) { 682 Base = Regs[I]; 683 Index = Regs[1 - I]; 684 // We can't tell here whether the index vector has the right type 685 // for the access; the caller needs to do that instead. 686 if (Index.getOpcode() == ISD::ZERO_EXTEND) 687 Index = Index.getOperand(0); 688 if (Index.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 689 Index.getOperand(1) == Elem) { 690 Index = Index.getOperand(0); 691 return true; 692 } 693 } 694 } 695 return false; 696 } 697 698 bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op, 699 uint64_t InsertMask) const { 700 // We're only interested in cases where the insertion is into some operand 701 // of Op, rather than into Op itself. The only useful case is an AND. 702 if (Op.getOpcode() != ISD::AND) 703 return false; 704 705 // We need a constant mask. 706 auto *MaskNode = dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode()); 707 if (!MaskNode) 708 return false; 709 710 // It's not an insertion of Op.getOperand(0) if the two masks overlap. 711 uint64_t AndMask = MaskNode->getZExtValue(); 712 if (InsertMask & AndMask) 713 return false; 714 715 // It's only an insertion if all bits are covered or are known to be zero. 716 // The inner check covers all cases but is more expensive. 717 uint64_t Used = allOnes(Op.getValueSizeInBits()); 718 if (Used != (AndMask | InsertMask)) { 719 KnownBits Known; 720 CurDAG->computeKnownBits(Op.getOperand(0), Known); 721 if (Used != (AndMask | InsertMask | Known.Zero.getZExtValue())) 722 return false; 723 } 724 725 Op = Op.getOperand(0); 726 return true; 727 } 728 729 bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG, 730 uint64_t Mask) const { 731 const SystemZInstrInfo *TII = getInstrInfo(); 732 if (RxSBG.Rotate != 0) 733 Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)); 734 Mask &= RxSBG.Mask; 735 if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) { 736 RxSBG.Mask = Mask; 737 return true; 738 } 739 return false; 740 } 741 742 // Return true if any bits of (RxSBG.Input & Mask) are significant. 743 static bool maskMatters(RxSBGOperands &RxSBG, uint64_t Mask) { 744 // Rotate the mask in the same way as RxSBG.Input is rotated. 745 if (RxSBG.Rotate != 0) 746 Mask = ((Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate))); 747 return (Mask & RxSBG.Mask) != 0; 748 } 749 750 bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { 751 SDValue N = RxSBG.Input; 752 unsigned Opcode = N.getOpcode(); 753 switch (Opcode) { 754 case ISD::TRUNCATE: { 755 if (RxSBG.Opcode == SystemZ::RNSBG) 756 return false; 757 uint64_t BitSize = N.getValueSizeInBits(); 758 uint64_t Mask = allOnes(BitSize); 759 if (!refineRxSBGMask(RxSBG, Mask)) 760 return false; 761 RxSBG.Input = N.getOperand(0); 762 return true; 763 } 764 case ISD::AND: { 765 if (RxSBG.Opcode == SystemZ::RNSBG) 766 return false; 767 768 auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 769 if (!MaskNode) 770 return false; 771 772 SDValue Input = N.getOperand(0); 773 uint64_t Mask = MaskNode->getZExtValue(); 774 if (!refineRxSBGMask(RxSBG, Mask)) { 775 // If some bits of Input are already known zeros, those bits will have 776 // been removed from the mask. See if adding them back in makes the 777 // mask suitable. 778 KnownBits Known; 779 CurDAG->computeKnownBits(Input, Known); 780 Mask |= Known.Zero.getZExtValue(); 781 if (!refineRxSBGMask(RxSBG, Mask)) 782 return false; 783 } 784 RxSBG.Input = Input; 785 return true; 786 } 787 788 case ISD::OR: { 789 if (RxSBG.Opcode != SystemZ::RNSBG) 790 return false; 791 792 auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 793 if (!MaskNode) 794 return false; 795 796 SDValue Input = N.getOperand(0); 797 uint64_t Mask = ~MaskNode->getZExtValue(); 798 if (!refineRxSBGMask(RxSBG, Mask)) { 799 // If some bits of Input are already known ones, those bits will have 800 // been removed from the mask. See if adding them back in makes the 801 // mask suitable. 802 KnownBits Known; 803 CurDAG->computeKnownBits(Input, Known); 804 Mask &= ~Known.One.getZExtValue(); 805 if (!refineRxSBGMask(RxSBG, Mask)) 806 return false; 807 } 808 RxSBG.Input = Input; 809 return true; 810 } 811 812 case ISD::ROTL: { 813 // Any 64-bit rotate left can be merged into the RxSBG. 814 if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64) 815 return false; 816 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 817 if (!CountNode) 818 return false; 819 820 RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63; 821 RxSBG.Input = N.getOperand(0); 822 return true; 823 } 824 825 case ISD::ANY_EXTEND: 826 // Bits above the extended operand are don't-care. 827 RxSBG.Input = N.getOperand(0); 828 return true; 829 830 case ISD::ZERO_EXTEND: 831 if (RxSBG.Opcode != SystemZ::RNSBG) { 832 // Restrict the mask to the extended operand. 833 unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits(); 834 if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize))) 835 return false; 836 837 RxSBG.Input = N.getOperand(0); 838 return true; 839 } 840 LLVM_FALLTHROUGH; 841 842 case ISD::SIGN_EXTEND: { 843 // Check that the extension bits are don't-care (i.e. are masked out 844 // by the final mask). 845 unsigned BitSize = N.getValueSizeInBits(); 846 unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits(); 847 if (maskMatters(RxSBG, allOnes(BitSize) - allOnes(InnerBitSize))) { 848 // In the case where only the sign bit is active, increase Rotate with 849 // the extension width. 850 if (RxSBG.Mask == 1 && RxSBG.Rotate == 1) 851 RxSBG.Rotate += (BitSize - InnerBitSize); 852 else 853 return false; 854 } 855 856 RxSBG.Input = N.getOperand(0); 857 return true; 858 } 859 860 case ISD::SHL: { 861 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 862 if (!CountNode) 863 return false; 864 865 uint64_t Count = CountNode->getZExtValue(); 866 unsigned BitSize = N.getValueSizeInBits(); 867 if (Count < 1 || Count >= BitSize) 868 return false; 869 870 if (RxSBG.Opcode == SystemZ::RNSBG) { 871 // Treat (shl X, count) as (rotl X, size-count) as long as the bottom 872 // count bits from RxSBG.Input are ignored. 873 if (maskMatters(RxSBG, allOnes(Count))) 874 return false; 875 } else { 876 // Treat (shl X, count) as (and (rotl X, count), ~0<<count). 877 if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count) << Count)) 878 return false; 879 } 880 881 RxSBG.Rotate = (RxSBG.Rotate + Count) & 63; 882 RxSBG.Input = N.getOperand(0); 883 return true; 884 } 885 886 case ISD::SRL: 887 case ISD::SRA: { 888 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 889 if (!CountNode) 890 return false; 891 892 uint64_t Count = CountNode->getZExtValue(); 893 unsigned BitSize = N.getValueSizeInBits(); 894 if (Count < 1 || Count >= BitSize) 895 return false; 896 897 if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) { 898 // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top 899 // count bits from RxSBG.Input are ignored. 900 if (maskMatters(RxSBG, allOnes(Count) << (BitSize - Count))) 901 return false; 902 } else { 903 // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count), 904 // which is similar to SLL above. 905 if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count))) 906 return false; 907 } 908 909 RxSBG.Rotate = (RxSBG.Rotate - Count) & 63; 910 RxSBG.Input = N.getOperand(0); 911 return true; 912 } 913 default: 914 return false; 915 } 916 } 917 918 SDValue SystemZDAGToDAGISel::getUNDEF(const SDLoc &DL, EVT VT) const { 919 SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT); 920 return SDValue(N, 0); 921 } 922 923 SDValue SystemZDAGToDAGISel::convertTo(const SDLoc &DL, EVT VT, 924 SDValue N) const { 925 if (N.getValueType() == MVT::i32 && VT == MVT::i64) 926 return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32, 927 DL, VT, getUNDEF(DL, MVT::i64), N); 928 if (N.getValueType() == MVT::i64 && VT == MVT::i32) 929 return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N); 930 assert(N.getValueType() == VT && "Unexpected value types"); 931 return N; 932 } 933 934 bool SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) { 935 SDLoc DL(N); 936 EVT VT = N->getValueType(0); 937 if (!VT.isInteger() || VT.getSizeInBits() > 64) 938 return false; 939 RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0)); 940 unsigned Count = 0; 941 while (expandRxSBG(RISBG)) 942 // The widening or narrowing is expected to be free. 943 // Counting widening or narrowing as a saved operation will result in 944 // preferring an R*SBG over a simple shift/logical instruction. 945 if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND && 946 RISBG.Input.getOpcode() != ISD::TRUNCATE) 947 Count += 1; 948 if (Count == 0) 949 return false; 950 951 // Prefer to use normal shift instructions over RISBG, since they can handle 952 // all cases and are sometimes shorter. 953 if (Count == 1 && N->getOpcode() != ISD::AND) 954 return false; 955 956 // Prefer register extensions like LLC over RISBG. Also prefer to start 957 // out with normal ANDs if one instruction would be enough. We can convert 958 // these ANDs into an RISBG later if a three-address instruction is useful. 959 if (RISBG.Rotate == 0) { 960 bool PreferAnd = false; 961 // Prefer AND for any 32-bit and-immediate operation. 962 if (VT == MVT::i32) 963 PreferAnd = true; 964 // As well as for any 64-bit operation that can be implemented via LLC(R), 965 // LLH(R), LLGT(R), or one of the and-immediate instructions. 966 else if (RISBG.Mask == 0xff || 967 RISBG.Mask == 0xffff || 968 RISBG.Mask == 0x7fffffff || 969 SystemZ::isImmLF(~RISBG.Mask) || 970 SystemZ::isImmHF(~RISBG.Mask)) 971 PreferAnd = true; 972 // And likewise for the LLZRGF instruction, which doesn't have a register 973 // to register version. 974 else if (auto *Load = dyn_cast<LoadSDNode>(RISBG.Input)) { 975 if (Load->getMemoryVT() == MVT::i32 && 976 (Load->getExtensionType() == ISD::EXTLOAD || 977 Load->getExtensionType() == ISD::ZEXTLOAD) && 978 RISBG.Mask == 0xffffff00 && 979 Subtarget->hasLoadAndZeroRightmostByte()) 980 PreferAnd = true; 981 } 982 if (PreferAnd) { 983 // Replace the current node with an AND. Note that the current node 984 // might already be that same AND, in which case it is already CSE'd 985 // with it, and we must not call ReplaceNode. 986 SDValue In = convertTo(DL, VT, RISBG.Input); 987 SDValue Mask = CurDAG->getConstant(RISBG.Mask, DL, VT); 988 SDValue New = CurDAG->getNode(ISD::AND, DL, VT, In, Mask); 989 if (N != New.getNode()) { 990 insertDAGNode(CurDAG, N, Mask); 991 insertDAGNode(CurDAG, N, New); 992 ReplaceNode(N, New.getNode()); 993 N = New.getNode(); 994 } 995 // Now, select the machine opcode to implement this operation. 996 if (!N->isMachineOpcode()) 997 SelectCode(N); 998 return true; 999 } 1000 } 1001 1002 unsigned Opcode = SystemZ::RISBG; 1003 // Prefer RISBGN if available, since it does not clobber CC. 1004 if (Subtarget->hasMiscellaneousExtensions()) 1005 Opcode = SystemZ::RISBGN; 1006 EVT OpcodeVT = MVT::i64; 1007 if (VT == MVT::i32 && Subtarget->hasHighWord() && 1008 // We can only use the 32-bit instructions if all source bits are 1009 // in the low 32 bits without wrapping, both after rotation (because 1010 // of the smaller range for Start and End) and before rotation 1011 // (because the input value is truncated). 1012 RISBG.Start >= 32 && RISBG.End >= RISBG.Start && 1013 ((RISBG.Start + RISBG.Rotate) & 63) >= 32 && 1014 ((RISBG.End + RISBG.Rotate) & 63) >= 1015 ((RISBG.Start + RISBG.Rotate) & 63)) { 1016 Opcode = SystemZ::RISBMux; 1017 OpcodeVT = MVT::i32; 1018 RISBG.Start &= 31; 1019 RISBG.End &= 31; 1020 } 1021 SDValue Ops[5] = { 1022 getUNDEF(DL, OpcodeVT), 1023 convertTo(DL, OpcodeVT, RISBG.Input), 1024 CurDAG->getTargetConstant(RISBG.Start, DL, MVT::i32), 1025 CurDAG->getTargetConstant(RISBG.End | 128, DL, MVT::i32), 1026 CurDAG->getTargetConstant(RISBG.Rotate, DL, MVT::i32) 1027 }; 1028 SDValue New = convertTo( 1029 DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, OpcodeVT, Ops), 0)); 1030 ReplaceUses(N, New.getNode()); 1031 CurDAG->RemoveDeadNode(N); 1032 return true; 1033 } 1034 1035 bool SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) { 1036 SDLoc DL(N); 1037 EVT VT = N->getValueType(0); 1038 if (!VT.isInteger() || VT.getSizeInBits() > 64) 1039 return false; 1040 // Try treating each operand of N as the second operand of the RxSBG 1041 // and see which goes deepest. 1042 RxSBGOperands RxSBG[] = { 1043 RxSBGOperands(Opcode, N->getOperand(0)), 1044 RxSBGOperands(Opcode, N->getOperand(1)) 1045 }; 1046 unsigned Count[] = { 0, 0 }; 1047 for (unsigned I = 0; I < 2; ++I) 1048 while (expandRxSBG(RxSBG[I])) 1049 // The widening or narrowing is expected to be free. 1050 // Counting widening or narrowing as a saved operation will result in 1051 // preferring an R*SBG over a simple shift/logical instruction. 1052 if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND && 1053 RxSBG[I].Input.getOpcode() != ISD::TRUNCATE) 1054 Count[I] += 1; 1055 1056 // Do nothing if neither operand is suitable. 1057 if (Count[0] == 0 && Count[1] == 0) 1058 return false; 1059 1060 // Pick the deepest second operand. 1061 unsigned I = Count[0] > Count[1] ? 0 : 1; 1062 SDValue Op0 = N->getOperand(I ^ 1); 1063 1064 // Prefer IC for character insertions from memory. 1065 if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0) 1066 if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode())) 1067 if (Load->getMemoryVT() == MVT::i8) 1068 return false; 1069 1070 // See whether we can avoid an AND in the first operand by converting 1071 // ROSBG to RISBG. 1072 if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask)) { 1073 Opcode = SystemZ::RISBG; 1074 // Prefer RISBGN if available, since it does not clobber CC. 1075 if (Subtarget->hasMiscellaneousExtensions()) 1076 Opcode = SystemZ::RISBGN; 1077 } 1078 1079 SDValue Ops[5] = { 1080 convertTo(DL, MVT::i64, Op0), 1081 convertTo(DL, MVT::i64, RxSBG[I].Input), 1082 CurDAG->getTargetConstant(RxSBG[I].Start, DL, MVT::i32), 1083 CurDAG->getTargetConstant(RxSBG[I].End, DL, MVT::i32), 1084 CurDAG->getTargetConstant(RxSBG[I].Rotate, DL, MVT::i32) 1085 }; 1086 SDValue New = convertTo( 1087 DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, MVT::i64, Ops), 0)); 1088 ReplaceNode(N, New.getNode()); 1089 return true; 1090 } 1091 1092 void SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node, 1093 SDValue Op0, uint64_t UpperVal, 1094 uint64_t LowerVal) { 1095 EVT VT = Node->getValueType(0); 1096 SDLoc DL(Node); 1097 SDValue Upper = CurDAG->getConstant(UpperVal, DL, VT); 1098 if (Op0.getNode()) 1099 Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper); 1100 1101 { 1102 // When we haven't passed in Op0, Upper will be a constant. In order to 1103 // prevent folding back to the large immediate in `Or = getNode(...)` we run 1104 // SelectCode first and end up with an opaque machine node. This means that 1105 // we need to use a handle to keep track of Upper in case it gets CSE'd by 1106 // SelectCode. 1107 // 1108 // Note that in the case where Op0 is passed in we could just call 1109 // SelectCode(Upper) later, along with the SelectCode(Or), and avoid needing 1110 // the handle at all, but it's fine to do it here. 1111 // 1112 // TODO: This is a pretty hacky way to do this. Can we do something that 1113 // doesn't require a two paragraph explanation? 1114 HandleSDNode Handle(Upper); 1115 SelectCode(Upper.getNode()); 1116 Upper = Handle.getValue(); 1117 } 1118 1119 SDValue Lower = CurDAG->getConstant(LowerVal, DL, VT); 1120 SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower); 1121 1122 ReplaceUses(Node, Or.getNode()); 1123 CurDAG->RemoveDeadNode(Node); 1124 1125 SelectCode(Or.getNode()); 1126 } 1127 1128 bool SystemZDAGToDAGISel::tryGather(SDNode *N, unsigned Opcode) { 1129 SDValue ElemV = N->getOperand(2); 1130 auto *ElemN = dyn_cast<ConstantSDNode>(ElemV); 1131 if (!ElemN) 1132 return false; 1133 1134 unsigned Elem = ElemN->getZExtValue(); 1135 EVT VT = N->getValueType(0); 1136 if (Elem >= VT.getVectorNumElements()) 1137 return false; 1138 1139 auto *Load = dyn_cast<LoadSDNode>(N->getOperand(1)); 1140 if (!Load || !Load->hasOneUse()) 1141 return false; 1142 if (Load->getMemoryVT().getSizeInBits() != 1143 Load->getValueType(0).getSizeInBits()) 1144 return false; 1145 1146 SDValue Base, Disp, Index; 1147 if (!selectBDVAddr12Only(Load->getBasePtr(), ElemV, Base, Disp, Index) || 1148 Index.getValueType() != VT.changeVectorElementTypeToInteger()) 1149 return false; 1150 1151 SDLoc DL(Load); 1152 SDValue Ops[] = { 1153 N->getOperand(0), Base, Disp, Index, 1154 CurDAG->getTargetConstant(Elem, DL, MVT::i32), Load->getChain() 1155 }; 1156 SDNode *Res = CurDAG->getMachineNode(Opcode, DL, VT, MVT::Other, Ops); 1157 ReplaceUses(SDValue(Load, 1), SDValue(Res, 1)); 1158 ReplaceNode(N, Res); 1159 return true; 1160 } 1161 1162 bool SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) { 1163 SDValue Value = Store->getValue(); 1164 if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 1165 return false; 1166 if (Store->getMemoryVT().getSizeInBits() != Value.getValueSizeInBits()) 1167 return false; 1168 1169 SDValue ElemV = Value.getOperand(1); 1170 auto *ElemN = dyn_cast<ConstantSDNode>(ElemV); 1171 if (!ElemN) 1172 return false; 1173 1174 SDValue Vec = Value.getOperand(0); 1175 EVT VT = Vec.getValueType(); 1176 unsigned Elem = ElemN->getZExtValue(); 1177 if (Elem >= VT.getVectorNumElements()) 1178 return false; 1179 1180 SDValue Base, Disp, Index; 1181 if (!selectBDVAddr12Only(Store->getBasePtr(), ElemV, Base, Disp, Index) || 1182 Index.getValueType() != VT.changeVectorElementTypeToInteger()) 1183 return false; 1184 1185 SDLoc DL(Store); 1186 SDValue Ops[] = { 1187 Vec, Base, Disp, Index, CurDAG->getTargetConstant(Elem, DL, MVT::i32), 1188 Store->getChain() 1189 }; 1190 ReplaceNode(Store, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops)); 1191 return true; 1192 } 1193 1194 bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store, 1195 LoadSDNode *Load) const { 1196 // Check that the two memory operands have the same size. 1197 if (Load->getMemoryVT() != Store->getMemoryVT()) 1198 return false; 1199 1200 // Volatility stops an access from being decomposed. 1201 if (Load->isVolatile() || Store->isVolatile()) 1202 return false; 1203 1204 // There's no chance of overlap if the load is invariant. 1205 if (Load->isInvariant() && Load->isDereferenceable()) 1206 return true; 1207 1208 // Otherwise we need to check whether there's an alias. 1209 const Value *V1 = Load->getMemOperand()->getValue(); 1210 const Value *V2 = Store->getMemOperand()->getValue(); 1211 if (!V1 || !V2) 1212 return false; 1213 1214 // Reject equality. 1215 uint64_t Size = Load->getMemoryVT().getStoreSize(); 1216 int64_t End1 = Load->getSrcValueOffset() + Size; 1217 int64_t End2 = Store->getSrcValueOffset() + Size; 1218 if (V1 == V2 && End1 == End2) 1219 return false; 1220 1221 return !AA->alias(MemoryLocation(V1, End1, Load->getAAInfo()), 1222 MemoryLocation(V2, End2, Store->getAAInfo())); 1223 } 1224 1225 bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const { 1226 auto *Store = cast<StoreSDNode>(N); 1227 auto *Load = cast<LoadSDNode>(Store->getValue()); 1228 1229 // Prefer not to use MVC if either address can use ... RELATIVE LONG 1230 // instructions. 1231 uint64_t Size = Load->getMemoryVT().getStoreSize(); 1232 if (Size > 1 && Size <= 8) { 1233 // Prefer LHRL, LRL and LGRL. 1234 if (SystemZISD::isPCREL(Load->getBasePtr().getOpcode())) 1235 return false; 1236 // Prefer STHRL, STRL and STGRL. 1237 if (SystemZISD::isPCREL(Store->getBasePtr().getOpcode())) 1238 return false; 1239 } 1240 1241 return canUseBlockOperation(Store, Load); 1242 } 1243 1244 bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N, 1245 unsigned I) const { 1246 auto *StoreA = cast<StoreSDNode>(N); 1247 auto *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I)); 1248 auto *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I)); 1249 return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB); 1250 } 1251 1252 void SystemZDAGToDAGISel::Select(SDNode *Node) { 1253 // If we have a custom node, we already have selected! 1254 if (Node->isMachineOpcode()) { 1255 DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); 1256 Node->setNodeId(-1); 1257 return; 1258 } 1259 1260 unsigned Opcode = Node->getOpcode(); 1261 switch (Opcode) { 1262 case ISD::OR: 1263 if (Node->getOperand(1).getOpcode() != ISD::Constant) 1264 if (tryRxSBG(Node, SystemZ::ROSBG)) 1265 return; 1266 goto or_xor; 1267 1268 case ISD::XOR: 1269 if (Node->getOperand(1).getOpcode() != ISD::Constant) 1270 if (tryRxSBG(Node, SystemZ::RXSBG)) 1271 return; 1272 // Fall through. 1273 or_xor: 1274 // If this is a 64-bit operation in which both 32-bit halves are nonzero, 1275 // split the operation into two. If both operands here happen to be 1276 // constant, leave this to common code to optimize. 1277 if (Node->getValueType(0) == MVT::i64 && 1278 Node->getOperand(0).getOpcode() != ISD::Constant) 1279 if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) { 1280 uint64_t Val = Op1->getZExtValue(); 1281 if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) { 1282 splitLargeImmediate(Opcode, Node, Node->getOperand(0), 1283 Val - uint32_t(Val), uint32_t(Val)); 1284 return; 1285 } 1286 } 1287 break; 1288 1289 case ISD::AND: 1290 if (Node->getOperand(1).getOpcode() != ISD::Constant) 1291 if (tryRxSBG(Node, SystemZ::RNSBG)) 1292 return; 1293 LLVM_FALLTHROUGH; 1294 case ISD::ROTL: 1295 case ISD::SHL: 1296 case ISD::SRL: 1297 case ISD::ZERO_EXTEND: 1298 if (tryRISBGZero(Node)) 1299 return; 1300 break; 1301 1302 case ISD::Constant: 1303 // If this is a 64-bit constant that is out of the range of LLILF, 1304 // LLIHF and LGFI, split it into two 32-bit pieces. 1305 if (Node->getValueType(0) == MVT::i64) { 1306 uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue(); 1307 if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) { 1308 splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val), 1309 uint32_t(Val)); 1310 return; 1311 } 1312 } 1313 break; 1314 1315 case SystemZISD::SELECT_CCMASK: { 1316 SDValue Op0 = Node->getOperand(0); 1317 SDValue Op1 = Node->getOperand(1); 1318 // Prefer to put any load first, so that it can be matched as a 1319 // conditional load. Likewise for constants in range for LOCHI. 1320 if ((Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) || 1321 (Subtarget->hasLoadStoreOnCond2() && 1322 Node->getValueType(0).isInteger() && 1323 Op1.getOpcode() == ISD::Constant && 1324 isInt<16>(cast<ConstantSDNode>(Op1)->getSExtValue()) && 1325 !(Op0.getOpcode() == ISD::Constant && 1326 isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) { 1327 SDValue CCValid = Node->getOperand(2); 1328 SDValue CCMask = Node->getOperand(3); 1329 uint64_t ConstCCValid = 1330 cast<ConstantSDNode>(CCValid.getNode())->getZExtValue(); 1331 uint64_t ConstCCMask = 1332 cast<ConstantSDNode>(CCMask.getNode())->getZExtValue(); 1333 // Invert the condition. 1334 CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask, SDLoc(Node), 1335 CCMask.getValueType()); 1336 SDValue Op4 = Node->getOperand(4); 1337 Node = CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4); 1338 } 1339 break; 1340 } 1341 1342 case ISD::INSERT_VECTOR_ELT: { 1343 EVT VT = Node->getValueType(0); 1344 unsigned ElemBitSize = VT.getScalarSizeInBits(); 1345 if (ElemBitSize == 32) { 1346 if (tryGather(Node, SystemZ::VGEF)) 1347 return; 1348 } else if (ElemBitSize == 64) { 1349 if (tryGather(Node, SystemZ::VGEG)) 1350 return; 1351 } 1352 break; 1353 } 1354 1355 case ISD::STORE: { 1356 auto *Store = cast<StoreSDNode>(Node); 1357 unsigned ElemBitSize = Store->getValue().getValueSizeInBits(); 1358 if (ElemBitSize == 32) { 1359 if (tryScatter(Store, SystemZ::VSCEF)) 1360 return; 1361 } else if (ElemBitSize == 64) { 1362 if (tryScatter(Store, SystemZ::VSCEG)) 1363 return; 1364 } 1365 break; 1366 } 1367 } 1368 1369 SelectCode(Node); 1370 } 1371 1372 bool SystemZDAGToDAGISel:: 1373 SelectInlineAsmMemoryOperand(const SDValue &Op, 1374 unsigned ConstraintID, 1375 std::vector<SDValue> &OutOps) { 1376 SystemZAddressingMode::AddrForm Form; 1377 SystemZAddressingMode::DispRange DispRange; 1378 SDValue Base, Disp, Index; 1379 1380 switch(ConstraintID) { 1381 default: 1382 llvm_unreachable("Unexpected asm memory constraint"); 1383 case InlineAsm::Constraint_i: 1384 case InlineAsm::Constraint_Q: 1385 // Accept an address with a short displacement, but no index. 1386 Form = SystemZAddressingMode::FormBD; 1387 DispRange = SystemZAddressingMode::Disp12Only; 1388 break; 1389 case InlineAsm::Constraint_R: 1390 // Accept an address with a short displacement and an index. 1391 Form = SystemZAddressingMode::FormBDXNormal; 1392 DispRange = SystemZAddressingMode::Disp12Only; 1393 break; 1394 case InlineAsm::Constraint_S: 1395 // Accept an address with a long displacement, but no index. 1396 Form = SystemZAddressingMode::FormBD; 1397 DispRange = SystemZAddressingMode::Disp20Only; 1398 break; 1399 case InlineAsm::Constraint_T: 1400 case InlineAsm::Constraint_m: 1401 case InlineAsm::Constraint_o: 1402 // Accept an address with a long displacement and an index. 1403 // m works the same as T, as this is the most general case. 1404 // We don't really have any special handling of "offsettable" 1405 // memory addresses, so just treat o the same as m. 1406 Form = SystemZAddressingMode::FormBDXNormal; 1407 DispRange = SystemZAddressingMode::Disp20Only; 1408 break; 1409 } 1410 1411 if (selectBDXAddr(Form, DispRange, Op, Base, Disp, Index)) { 1412 const TargetRegisterClass *TRC = 1413 Subtarget->getRegisterInfo()->getPointerRegClass(*MF); 1414 SDLoc DL(Base); 1415 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), DL, MVT::i32); 1416 1417 // Make sure that the base address doesn't go into %r0. 1418 // If it's a TargetFrameIndex or a fixed register, we shouldn't do anything. 1419 if (Base.getOpcode() != ISD::TargetFrameIndex && 1420 Base.getOpcode() != ISD::Register) { 1421 Base = 1422 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 1423 DL, Base.getValueType(), 1424 Base, RC), 0); 1425 } 1426 1427 // Make sure that the index register isn't assigned to %r0 either. 1428 if (Index.getOpcode() != ISD::Register) { 1429 Index = 1430 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 1431 DL, Index.getValueType(), 1432 Index, RC), 0); 1433 } 1434 1435 OutOps.push_back(Base); 1436 OutOps.push_back(Disp); 1437 OutOps.push_back(Index); 1438 return false; 1439 } 1440 1441 return true; 1442 } 1443 1444 namespace { 1445 // Represents a sequence for extracting a 0/1 value from an IPM result: 1446 // (((X ^ XORValue) + AddValue) >> Bit) 1447 struct IPMConversion { 1448 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) 1449 : XORValue(xorValue), AddValue(addValue), Bit(bit) {} 1450 1451 int64_t XORValue; 1452 int64_t AddValue; 1453 unsigned Bit; 1454 }; 1455 } // end anonymous namespace 1456 1457 // Return a sequence for getting a 1 from an IPM result when CC has a 1458 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. 1459 // The handling of CC values outside CCValid doesn't matter. 1460 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { 1461 // Deal with cases where the result can be taken directly from a bit 1462 // of the IPM result. 1463 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) 1464 return IPMConversion(0, 0, SystemZ::IPM_CC); 1465 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))) 1466 return IPMConversion(0, 0, SystemZ::IPM_CC + 1); 1467 1468 // Deal with cases where we can add a value to force the sign bit 1469 // to contain the right value. Putting the bit in 31 means we can 1470 // use SRL rather than RISBG(L), and also makes it easier to get a 1471 // 0/-1 value, so it has priority over the other tests below. 1472 // 1473 // These sequences rely on the fact that the upper two bits of the 1474 // IPM result are zero. 1475 uint64_t TopBit = uint64_t(1) << 31; 1476 if (CCMask == (CCValid & SystemZ::CCMASK_0)) 1477 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); 1478 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))) 1479 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); 1480 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1481 | SystemZ::CCMASK_1 1482 | SystemZ::CCMASK_2))) 1483 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); 1484 if (CCMask == (CCValid & SystemZ::CCMASK_3)) 1485 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); 1486 if (CCMask == (CCValid & (SystemZ::CCMASK_1 1487 | SystemZ::CCMASK_2 1488 | SystemZ::CCMASK_3))) 1489 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); 1490 1491 // Next try inverting the value and testing a bit. 0/1 could be 1492 // handled this way too, but we dealt with that case above. 1493 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))) 1494 return IPMConversion(-1, 0, SystemZ::IPM_CC); 1495 1496 // Handle cases where adding a value forces a non-sign bit to contain 1497 // the right value. 1498 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))) 1499 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); 1500 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) 1501 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); 1502 1503 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are 1504 // can be done by inverting the low CC bit and applying one of the 1505 // sign-based extractions above. 1506 if (CCMask == (CCValid & SystemZ::CCMASK_1)) 1507 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); 1508 if (CCMask == (CCValid & SystemZ::CCMASK_2)) 1509 return IPMConversion(1 << SystemZ::IPM_CC, 1510 TopBit - (3 << SystemZ::IPM_CC), 31); 1511 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1512 | SystemZ::CCMASK_1 1513 | SystemZ::CCMASK_3))) 1514 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); 1515 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1516 | SystemZ::CCMASK_2 1517 | SystemZ::CCMASK_3))) 1518 return IPMConversion(1 << SystemZ::IPM_CC, 1519 TopBit - (1 << SystemZ::IPM_CC), 31); 1520 1521 llvm_unreachable("Unexpected CC combination"); 1522 } 1523 1524 SDValue SystemZDAGToDAGISel::expandSelectBoolean(SDNode *Node) { 1525 auto *TrueOp = dyn_cast<ConstantSDNode>(Node->getOperand(0)); 1526 auto *FalseOp = dyn_cast<ConstantSDNode>(Node->getOperand(1)); 1527 if (!TrueOp || !FalseOp) 1528 return SDValue(); 1529 if (FalseOp->getZExtValue() != 0) 1530 return SDValue(); 1531 if (TrueOp->getSExtValue() != 1 && TrueOp->getSExtValue() != -1) 1532 return SDValue(); 1533 1534 auto *CCValidOp = dyn_cast<ConstantSDNode>(Node->getOperand(2)); 1535 auto *CCMaskOp = dyn_cast<ConstantSDNode>(Node->getOperand(3)); 1536 if (!CCValidOp || !CCMaskOp) 1537 return SDValue(); 1538 int CCValid = CCValidOp->getZExtValue(); 1539 int CCMask = CCMaskOp->getZExtValue(); 1540 1541 SDLoc DL(Node); 1542 SDValue Glue = Node->getOperand(4); 1543 IPMConversion IPM = getIPMConversion(CCValid, CCMask); 1544 SDValue Result = CurDAG->getNode(SystemZISD::IPM, DL, MVT::i32, Glue); 1545 1546 if (IPM.XORValue) 1547 Result = CurDAG->getNode(ISD::XOR, DL, MVT::i32, Result, 1548 CurDAG->getConstant(IPM.XORValue, DL, MVT::i32)); 1549 1550 if (IPM.AddValue) 1551 Result = CurDAG->getNode(ISD::ADD, DL, MVT::i32, Result, 1552 CurDAG->getConstant(IPM.AddValue, DL, MVT::i32)); 1553 1554 EVT VT = Node->getValueType(0); 1555 if (VT == MVT::i32 && IPM.Bit == 31) { 1556 unsigned ShiftOp = TrueOp->getSExtValue() == 1 ? ISD::SRL : ISD::SRA; 1557 Result = CurDAG->getNode(ShiftOp, DL, MVT::i32, Result, 1558 CurDAG->getConstant(IPM.Bit, DL, MVT::i32)); 1559 } else { 1560 if (VT != MVT::i32) 1561 Result = CurDAG->getNode(ISD::ANY_EXTEND, DL, VT, Result); 1562 1563 if (TrueOp->getSExtValue() == 1) { 1564 // The SHR/AND sequence should get optimized to an RISBG. 1565 Result = CurDAG->getNode(ISD::SRL, DL, VT, Result, 1566 CurDAG->getConstant(IPM.Bit, DL, MVT::i32)); 1567 Result = CurDAG->getNode(ISD::AND, DL, VT, Result, 1568 CurDAG->getConstant(1, DL, VT)); 1569 } else { 1570 // Sign-extend from IPM.Bit using a pair of shifts. 1571 int ShlAmt = VT.getSizeInBits() - 1 - IPM.Bit; 1572 int SraAmt = VT.getSizeInBits() - 1; 1573 Result = CurDAG->getNode(ISD::SHL, DL, VT, Result, 1574 CurDAG->getConstant(ShlAmt, DL, MVT::i32)); 1575 Result = CurDAG->getNode(ISD::SRA, DL, VT, Result, 1576 CurDAG->getConstant(SraAmt, DL, MVT::i32)); 1577 } 1578 } 1579 1580 return Result; 1581 } 1582 1583 void SystemZDAGToDAGISel::PreprocessISelDAG() { 1584 // If we have conditional immediate loads, we always prefer 1585 // using those over an IPM sequence. 1586 if (Subtarget->hasLoadStoreOnCond2()) 1587 return; 1588 1589 bool MadeChange = false; 1590 1591 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 1592 E = CurDAG->allnodes_end(); 1593 I != E;) { 1594 SDNode *N = &*I++; 1595 if (N->use_empty()) 1596 continue; 1597 1598 SDValue Res; 1599 switch (N->getOpcode()) { 1600 default: break; 1601 case SystemZISD::SELECT_CCMASK: 1602 Res = expandSelectBoolean(N); 1603 break; 1604 } 1605 1606 if (Res) { 1607 DEBUG(dbgs() << "SystemZ DAG preprocessing replacing:\nOld: "); 1608 DEBUG(N->dump(CurDAG)); 1609 DEBUG(dbgs() << "\nNew: "); 1610 DEBUG(Res.getNode()->dump(CurDAG)); 1611 DEBUG(dbgs() << "\n"); 1612 1613 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); 1614 MadeChange = true; 1615 } 1616 } 1617 1618 if (MadeChange) 1619 CurDAG->RemoveDeadNodes(); 1620 } 1621 1622