1 //===-- SystemZISelDAGToDAG.cpp - A dag to dag inst selector for SystemZ --===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file defines an instruction selector for the SystemZ target. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZTargetMachine.h" 15 #include "llvm/Analysis/AliasAnalysis.h" 16 #include "llvm/CodeGen/SelectionDAGISel.h" 17 #include "llvm/Support/Debug.h" 18 #include "llvm/Support/KnownBits.h" 19 #include "llvm/Support/raw_ostream.h" 20 21 using namespace llvm; 22 23 #define DEBUG_TYPE "systemz-isel" 24 25 namespace { 26 // Used to build addressing modes. 27 struct SystemZAddressingMode { 28 // The shape of the address. 29 enum AddrForm { 30 // base+displacement 31 FormBD, 32 33 // base+displacement+index for load and store operands 34 FormBDXNormal, 35 36 // base+displacement+index for load address operands 37 FormBDXLA, 38 39 // base+displacement+index+ADJDYNALLOC 40 FormBDXDynAlloc 41 }; 42 AddrForm Form; 43 44 // The type of displacement. The enum names here correspond directly 45 // to the definitions in SystemZOperand.td. We could split them into 46 // flags -- single/pair, 128-bit, etc. -- but it hardly seems worth it. 47 enum DispRange { 48 Disp12Only, 49 Disp12Pair, 50 Disp20Only, 51 Disp20Only128, 52 Disp20Pair 53 }; 54 DispRange DR; 55 56 // The parts of the address. The address is equivalent to: 57 // 58 // Base + Disp + Index + (IncludesDynAlloc ? ADJDYNALLOC : 0) 59 SDValue Base; 60 int64_t Disp; 61 SDValue Index; 62 bool IncludesDynAlloc; 63 64 SystemZAddressingMode(AddrForm form, DispRange dr) 65 : Form(form), DR(dr), Base(), Disp(0), Index(), 66 IncludesDynAlloc(false) {} 67 68 // True if the address can have an index register. 69 bool hasIndexField() { return Form != FormBD; } 70 71 // True if the address can (and must) include ADJDYNALLOC. 72 bool isDynAlloc() { return Form == FormBDXDynAlloc; } 73 74 void dump(const llvm::SelectionDAG *DAG) { 75 errs() << "SystemZAddressingMode " << this << '\n'; 76 77 errs() << " Base "; 78 if (Base.getNode()) 79 Base.getNode()->dump(DAG); 80 else 81 errs() << "null\n"; 82 83 if (hasIndexField()) { 84 errs() << " Index "; 85 if (Index.getNode()) 86 Index.getNode()->dump(DAG); 87 else 88 errs() << "null\n"; 89 } 90 91 errs() << " Disp " << Disp; 92 if (IncludesDynAlloc) 93 errs() << " + ADJDYNALLOC"; 94 errs() << '\n'; 95 } 96 }; 97 98 // Return a mask with Count low bits set. 99 static uint64_t allOnes(unsigned int Count) { 100 assert(Count <= 64); 101 if (Count > 63) 102 return UINT64_MAX; 103 return (uint64_t(1) << Count) - 1; 104 } 105 106 // Represents operands 2 to 5 of the ROTATE AND ... SELECTED BITS operation 107 // given by Opcode. The operands are: Input (R2), Start (I3), End (I4) and 108 // Rotate (I5). The combined operand value is effectively: 109 // 110 // (or (rotl Input, Rotate), ~Mask) 111 // 112 // for RNSBG and: 113 // 114 // (and (rotl Input, Rotate), Mask) 115 // 116 // otherwise. The output value has BitSize bits, although Input may be 117 // narrower (in which case the upper bits are don't care), or wider (in which 118 // case the result will be truncated as part of the operation). 119 struct RxSBGOperands { 120 RxSBGOperands(unsigned Op, SDValue N) 121 : Opcode(Op), BitSize(N.getValueSizeInBits()), 122 Mask(allOnes(BitSize)), Input(N), Start(64 - BitSize), End(63), 123 Rotate(0) {} 124 125 unsigned Opcode; 126 unsigned BitSize; 127 uint64_t Mask; 128 SDValue Input; 129 unsigned Start; 130 unsigned End; 131 unsigned Rotate; 132 }; 133 134 class SystemZDAGToDAGISel : public SelectionDAGISel { 135 const SystemZSubtarget *Subtarget; 136 137 // Used by SystemZOperands.td to create integer constants. 138 inline SDValue getImm(const SDNode *Node, uint64_t Imm) const { 139 return CurDAG->getTargetConstant(Imm, SDLoc(Node), Node->getValueType(0)); 140 } 141 142 const SystemZTargetMachine &getTargetMachine() const { 143 return static_cast<const SystemZTargetMachine &>(TM); 144 } 145 146 const SystemZInstrInfo *getInstrInfo() const { 147 return Subtarget->getInstrInfo(); 148 } 149 150 // Try to fold more of the base or index of AM into AM, where IsBase 151 // selects between the base and index. 152 bool expandAddress(SystemZAddressingMode &AM, bool IsBase) const; 153 154 // Try to describe N in AM, returning true on success. 155 bool selectAddress(SDValue N, SystemZAddressingMode &AM) const; 156 157 // Extract individual target operands from matched address AM. 158 void getAddressOperands(const SystemZAddressingMode &AM, EVT VT, 159 SDValue &Base, SDValue &Disp) const; 160 void getAddressOperands(const SystemZAddressingMode &AM, EVT VT, 161 SDValue &Base, SDValue &Disp, SDValue &Index) const; 162 163 // Try to match Addr as a FormBD address with displacement type DR. 164 // Return true on success, storing the base and displacement in 165 // Base and Disp respectively. 166 bool selectBDAddr(SystemZAddressingMode::DispRange DR, SDValue Addr, 167 SDValue &Base, SDValue &Disp) const; 168 169 // Try to match Addr as a FormBDX address with displacement type DR. 170 // Return true on success and if the result had no index. Store the 171 // base and displacement in Base and Disp respectively. 172 bool selectMVIAddr(SystemZAddressingMode::DispRange DR, SDValue Addr, 173 SDValue &Base, SDValue &Disp) const; 174 175 // Try to match Addr as a FormBDX* address of form Form with 176 // displacement type DR. Return true on success, storing the base, 177 // displacement and index in Base, Disp and Index respectively. 178 bool selectBDXAddr(SystemZAddressingMode::AddrForm Form, 179 SystemZAddressingMode::DispRange DR, SDValue Addr, 180 SDValue &Base, SDValue &Disp, SDValue &Index) const; 181 182 // PC-relative address matching routines used by SystemZOperands.td. 183 bool selectPCRelAddress(SDValue Addr, SDValue &Target) const { 184 if (SystemZISD::isPCREL(Addr.getOpcode())) { 185 Target = Addr.getOperand(0); 186 return true; 187 } 188 return false; 189 } 190 191 // BD matching routines used by SystemZOperands.td. 192 bool selectBDAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp) const { 193 return selectBDAddr(SystemZAddressingMode::Disp12Only, Addr, Base, Disp); 194 } 195 bool selectBDAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 196 return selectBDAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp); 197 } 198 bool selectBDAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp) const { 199 return selectBDAddr(SystemZAddressingMode::Disp20Only, Addr, Base, Disp); 200 } 201 bool selectBDAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 202 return selectBDAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp); 203 } 204 205 // MVI matching routines used by SystemZOperands.td. 206 bool selectMVIAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 207 return selectMVIAddr(SystemZAddressingMode::Disp12Pair, Addr, Base, Disp); 208 } 209 bool selectMVIAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp) const { 210 return selectMVIAddr(SystemZAddressingMode::Disp20Pair, Addr, Base, Disp); 211 } 212 213 // BDX matching routines used by SystemZOperands.td. 214 bool selectBDXAddr12Only(SDValue Addr, SDValue &Base, SDValue &Disp, 215 SDValue &Index) const { 216 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 217 SystemZAddressingMode::Disp12Only, 218 Addr, Base, Disp, Index); 219 } 220 bool selectBDXAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 221 SDValue &Index) const { 222 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 223 SystemZAddressingMode::Disp12Pair, 224 Addr, Base, Disp, Index); 225 } 226 bool selectDynAlloc12Only(SDValue Addr, SDValue &Base, SDValue &Disp, 227 SDValue &Index) const { 228 return selectBDXAddr(SystemZAddressingMode::FormBDXDynAlloc, 229 SystemZAddressingMode::Disp12Only, 230 Addr, Base, Disp, Index); 231 } 232 bool selectBDXAddr20Only(SDValue Addr, SDValue &Base, SDValue &Disp, 233 SDValue &Index) const { 234 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 235 SystemZAddressingMode::Disp20Only, 236 Addr, Base, Disp, Index); 237 } 238 bool selectBDXAddr20Only128(SDValue Addr, SDValue &Base, SDValue &Disp, 239 SDValue &Index) const { 240 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 241 SystemZAddressingMode::Disp20Only128, 242 Addr, Base, Disp, Index); 243 } 244 bool selectBDXAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 245 SDValue &Index) const { 246 return selectBDXAddr(SystemZAddressingMode::FormBDXNormal, 247 SystemZAddressingMode::Disp20Pair, 248 Addr, Base, Disp, Index); 249 } 250 bool selectLAAddr12Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 251 SDValue &Index) const { 252 return selectBDXAddr(SystemZAddressingMode::FormBDXLA, 253 SystemZAddressingMode::Disp12Pair, 254 Addr, Base, Disp, Index); 255 } 256 bool selectLAAddr20Pair(SDValue Addr, SDValue &Base, SDValue &Disp, 257 SDValue &Index) const { 258 return selectBDXAddr(SystemZAddressingMode::FormBDXLA, 259 SystemZAddressingMode::Disp20Pair, 260 Addr, Base, Disp, Index); 261 } 262 263 // Try to match Addr as an address with a base, 12-bit displacement 264 // and index, where the index is element Elem of a vector. 265 // Return true on success, storing the base, displacement and vector 266 // in Base, Disp and Index respectively. 267 bool selectBDVAddr12Only(SDValue Addr, SDValue Elem, SDValue &Base, 268 SDValue &Disp, SDValue &Index) const; 269 270 // Check whether (or Op (and X InsertMask)) is effectively an insertion 271 // of X into bits InsertMask of some Y != Op. Return true if so and 272 // set Op to that Y. 273 bool detectOrAndInsertion(SDValue &Op, uint64_t InsertMask) const; 274 275 // Try to update RxSBG so that only the bits of RxSBG.Input in Mask are used. 276 // Return true on success. 277 bool refineRxSBGMask(RxSBGOperands &RxSBG, uint64_t Mask) const; 278 279 // Try to fold some of RxSBG.Input into other fields of RxSBG. 280 // Return true on success. 281 bool expandRxSBG(RxSBGOperands &RxSBG) const; 282 283 // Return an undefined value of type VT. 284 SDValue getUNDEF(const SDLoc &DL, EVT VT) const; 285 286 // Convert N to VT, if it isn't already. 287 SDValue convertTo(const SDLoc &DL, EVT VT, SDValue N) const; 288 289 // Try to implement AND or shift node N using RISBG with the zero flag set. 290 // Return the selected node on success, otherwise return null. 291 bool tryRISBGZero(SDNode *N); 292 293 // Try to use RISBG or Opcode to implement OR or XOR node N. 294 // Return the selected node on success, otherwise return null. 295 bool tryRxSBG(SDNode *N, unsigned Opcode); 296 297 // If Op0 is null, then Node is a constant that can be loaded using: 298 // 299 // (Opcode UpperVal LowerVal) 300 // 301 // If Op0 is nonnull, then Node can be implemented using: 302 // 303 // (Opcode (Opcode Op0 UpperVal) LowerVal) 304 void splitLargeImmediate(unsigned Opcode, SDNode *Node, SDValue Op0, 305 uint64_t UpperVal, uint64_t LowerVal); 306 307 // Try to use gather instruction Opcode to implement vector insertion N. 308 bool tryGather(SDNode *N, unsigned Opcode); 309 310 // Try to use scatter instruction Opcode to implement store Store. 311 bool tryScatter(StoreSDNode *Store, unsigned Opcode); 312 313 // Change a chain of {load; op; store} of the same value into a simple op 314 // through memory of that value, if the uses of the modified value and its 315 // address are suitable. 316 bool tryFoldLoadStoreIntoMemOperand(SDNode *Node); 317 318 // Return true if Load and Store are loads and stores of the same size 319 // and are guaranteed not to overlap. Such operations can be implemented 320 // using block (SS-format) instructions. 321 // 322 // Partial overlap would lead to incorrect code, since the block operations 323 // are logically bytewise, even though they have a fast path for the 324 // non-overlapping case. We also need to avoid full overlap (i.e. two 325 // addresses that might be equal at run time) because although that case 326 // would be handled correctly, it might be implemented by millicode. 327 bool canUseBlockOperation(StoreSDNode *Store, LoadSDNode *Load) const; 328 329 // N is a (store (load Y), X) pattern. Return true if it can use an MVC 330 // from Y to X. 331 bool storeLoadCanUseMVC(SDNode *N) const; 332 333 // N is a (store (op (load A[0]), (load A[1])), X) pattern. Return true 334 // if A[1 - I] == X and if N can use a block operation like NC from A[I] 335 // to X. 336 bool storeLoadCanUseBlockBinary(SDNode *N, unsigned I) const; 337 338 // Try to expand a boolean SELECT_CCMASK using an IPM sequence. 339 SDValue expandSelectBoolean(SDNode *Node); 340 341 public: 342 SystemZDAGToDAGISel(SystemZTargetMachine &TM, CodeGenOpt::Level OptLevel) 343 : SelectionDAGISel(TM, OptLevel) {} 344 345 bool runOnMachineFunction(MachineFunction &MF) override { 346 Subtarget = &MF.getSubtarget<SystemZSubtarget>(); 347 return SelectionDAGISel::runOnMachineFunction(MF); 348 } 349 350 // Override MachineFunctionPass. 351 StringRef getPassName() const override { 352 return "SystemZ DAG->DAG Pattern Instruction Selection"; 353 } 354 355 // Override SelectionDAGISel. 356 void Select(SDNode *Node) override; 357 bool SelectInlineAsmMemoryOperand(const SDValue &Op, unsigned ConstraintID, 358 std::vector<SDValue> &OutOps) override; 359 bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override; 360 void PreprocessISelDAG() override; 361 362 // Include the pieces autogenerated from the target description. 363 #include "SystemZGenDAGISel.inc" 364 }; 365 } // end anonymous namespace 366 367 FunctionPass *llvm::createSystemZISelDag(SystemZTargetMachine &TM, 368 CodeGenOpt::Level OptLevel) { 369 return new SystemZDAGToDAGISel(TM, OptLevel); 370 } 371 372 // Return true if Val should be selected as a displacement for an address 373 // with range DR. Here we're interested in the range of both the instruction 374 // described by DR and of any pairing instruction. 375 static bool selectDisp(SystemZAddressingMode::DispRange DR, int64_t Val) { 376 switch (DR) { 377 case SystemZAddressingMode::Disp12Only: 378 return isUInt<12>(Val); 379 380 case SystemZAddressingMode::Disp12Pair: 381 case SystemZAddressingMode::Disp20Only: 382 case SystemZAddressingMode::Disp20Pair: 383 return isInt<20>(Val); 384 385 case SystemZAddressingMode::Disp20Only128: 386 return isInt<20>(Val) && isInt<20>(Val + 8); 387 } 388 llvm_unreachable("Unhandled displacement range"); 389 } 390 391 // Change the base or index in AM to Value, where IsBase selects 392 // between the base and index. 393 static void changeComponent(SystemZAddressingMode &AM, bool IsBase, 394 SDValue Value) { 395 if (IsBase) 396 AM.Base = Value; 397 else 398 AM.Index = Value; 399 } 400 401 // The base or index of AM is equivalent to Value + ADJDYNALLOC, 402 // where IsBase selects between the base and index. Try to fold the 403 // ADJDYNALLOC into AM. 404 static bool expandAdjDynAlloc(SystemZAddressingMode &AM, bool IsBase, 405 SDValue Value) { 406 if (AM.isDynAlloc() && !AM.IncludesDynAlloc) { 407 changeComponent(AM, IsBase, Value); 408 AM.IncludesDynAlloc = true; 409 return true; 410 } 411 return false; 412 } 413 414 // The base of AM is equivalent to Base + Index. Try to use Index as 415 // the index register. 416 static bool expandIndex(SystemZAddressingMode &AM, SDValue Base, 417 SDValue Index) { 418 if (AM.hasIndexField() && !AM.Index.getNode()) { 419 AM.Base = Base; 420 AM.Index = Index; 421 return true; 422 } 423 return false; 424 } 425 426 // The base or index of AM is equivalent to Op0 + Op1, where IsBase selects 427 // between the base and index. Try to fold Op1 into AM's displacement. 428 static bool expandDisp(SystemZAddressingMode &AM, bool IsBase, 429 SDValue Op0, uint64_t Op1) { 430 // First try adjusting the displacement. 431 int64_t TestDisp = AM.Disp + Op1; 432 if (selectDisp(AM.DR, TestDisp)) { 433 changeComponent(AM, IsBase, Op0); 434 AM.Disp = TestDisp; 435 return true; 436 } 437 438 // We could consider forcing the displacement into a register and 439 // using it as an index, but it would need to be carefully tuned. 440 return false; 441 } 442 443 bool SystemZDAGToDAGISel::expandAddress(SystemZAddressingMode &AM, 444 bool IsBase) const { 445 SDValue N = IsBase ? AM.Base : AM.Index; 446 unsigned Opcode = N.getOpcode(); 447 if (Opcode == ISD::TRUNCATE) { 448 N = N.getOperand(0); 449 Opcode = N.getOpcode(); 450 } 451 if (Opcode == ISD::ADD || CurDAG->isBaseWithConstantOffset(N)) { 452 SDValue Op0 = N.getOperand(0); 453 SDValue Op1 = N.getOperand(1); 454 455 unsigned Op0Code = Op0->getOpcode(); 456 unsigned Op1Code = Op1->getOpcode(); 457 458 if (Op0Code == SystemZISD::ADJDYNALLOC) 459 return expandAdjDynAlloc(AM, IsBase, Op1); 460 if (Op1Code == SystemZISD::ADJDYNALLOC) 461 return expandAdjDynAlloc(AM, IsBase, Op0); 462 463 if (Op0Code == ISD::Constant) 464 return expandDisp(AM, IsBase, Op1, 465 cast<ConstantSDNode>(Op0)->getSExtValue()); 466 if (Op1Code == ISD::Constant) 467 return expandDisp(AM, IsBase, Op0, 468 cast<ConstantSDNode>(Op1)->getSExtValue()); 469 470 if (IsBase && expandIndex(AM, Op0, Op1)) 471 return true; 472 } 473 if (Opcode == SystemZISD::PCREL_OFFSET) { 474 SDValue Full = N.getOperand(0); 475 SDValue Base = N.getOperand(1); 476 SDValue Anchor = Base.getOperand(0); 477 uint64_t Offset = (cast<GlobalAddressSDNode>(Full)->getOffset() - 478 cast<GlobalAddressSDNode>(Anchor)->getOffset()); 479 return expandDisp(AM, IsBase, Base, Offset); 480 } 481 return false; 482 } 483 484 // Return true if an instruction with displacement range DR should be 485 // used for displacement value Val. selectDisp(DR, Val) must already hold. 486 static bool isValidDisp(SystemZAddressingMode::DispRange DR, int64_t Val) { 487 assert(selectDisp(DR, Val) && "Invalid displacement"); 488 switch (DR) { 489 case SystemZAddressingMode::Disp12Only: 490 case SystemZAddressingMode::Disp20Only: 491 case SystemZAddressingMode::Disp20Only128: 492 return true; 493 494 case SystemZAddressingMode::Disp12Pair: 495 // Use the other instruction if the displacement is too large. 496 return isUInt<12>(Val); 497 498 case SystemZAddressingMode::Disp20Pair: 499 // Use the other instruction if the displacement is small enough. 500 return !isUInt<12>(Val); 501 } 502 llvm_unreachable("Unhandled displacement range"); 503 } 504 505 // Return true if Base + Disp + Index should be performed by LA(Y). 506 static bool shouldUseLA(SDNode *Base, int64_t Disp, SDNode *Index) { 507 // Don't use LA(Y) for constants. 508 if (!Base) 509 return false; 510 511 // Always use LA(Y) for frame addresses, since we know that the destination 512 // register is almost always (perhaps always) going to be different from 513 // the frame register. 514 if (Base->getOpcode() == ISD::FrameIndex) 515 return true; 516 517 if (Disp) { 518 // Always use LA(Y) if there is a base, displacement and index. 519 if (Index) 520 return true; 521 522 // Always use LA if the displacement is small enough. It should always 523 // be no worse than AGHI (and better if it avoids a move). 524 if (isUInt<12>(Disp)) 525 return true; 526 527 // For similar reasons, always use LAY if the constant is too big for AGHI. 528 // LAY should be no worse than AGFI. 529 if (!isInt<16>(Disp)) 530 return true; 531 } else { 532 // Don't use LA for plain registers. 533 if (!Index) 534 return false; 535 536 // Don't use LA for plain addition if the index operand is only used 537 // once. It should be a natural two-operand addition in that case. 538 if (Index->hasOneUse()) 539 return false; 540 541 // Prefer addition if the second operation is sign-extended, in the 542 // hope of using AGF. 543 unsigned IndexOpcode = Index->getOpcode(); 544 if (IndexOpcode == ISD::SIGN_EXTEND || 545 IndexOpcode == ISD::SIGN_EXTEND_INREG) 546 return false; 547 } 548 549 // Don't use LA for two-operand addition if either operand is only 550 // used once. The addition instructions are better in that case. 551 if (Base->hasOneUse()) 552 return false; 553 554 return true; 555 } 556 557 // Return true if Addr is suitable for AM, updating AM if so. 558 bool SystemZDAGToDAGISel::selectAddress(SDValue Addr, 559 SystemZAddressingMode &AM) const { 560 // Start out assuming that the address will need to be loaded separately, 561 // then try to extend it as much as we can. 562 AM.Base = Addr; 563 564 // First try treating the address as a constant. 565 if (Addr.getOpcode() == ISD::Constant && 566 expandDisp(AM, true, SDValue(), 567 cast<ConstantSDNode>(Addr)->getSExtValue())) 568 ; 569 // Also see if it's a bare ADJDYNALLOC. 570 else if (Addr.getOpcode() == SystemZISD::ADJDYNALLOC && 571 expandAdjDynAlloc(AM, true, SDValue())) 572 ; 573 else 574 // Otherwise try expanding each component. 575 while (expandAddress(AM, true) || 576 (AM.Index.getNode() && expandAddress(AM, false))) 577 continue; 578 579 // Reject cases where it isn't profitable to use LA(Y). 580 if (AM.Form == SystemZAddressingMode::FormBDXLA && 581 !shouldUseLA(AM.Base.getNode(), AM.Disp, AM.Index.getNode())) 582 return false; 583 584 // Reject cases where the other instruction in a pair should be used. 585 if (!isValidDisp(AM.DR, AM.Disp)) 586 return false; 587 588 // Make sure that ADJDYNALLOC is included where necessary. 589 if (AM.isDynAlloc() && !AM.IncludesDynAlloc) 590 return false; 591 592 LLVM_DEBUG(AM.dump(CurDAG)); 593 return true; 594 } 595 596 // Insert a node into the DAG at least before Pos. This will reposition 597 // the node as needed, and will assign it a node ID that is <= Pos's ID. 598 // Note that this does *not* preserve the uniqueness of node IDs! 599 // The selection DAG must no longer depend on their uniqueness when this 600 // function is used. 601 static void insertDAGNode(SelectionDAG *DAG, SDNode *Pos, SDValue N) { 602 if (N->getNodeId() == -1 || 603 (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) > 604 SelectionDAGISel::getUninvalidatedNodeId(Pos))) { 605 DAG->RepositionNode(Pos->getIterator(), N.getNode()); 606 // Mark Node as invalid for pruning as after this it may be a successor to a 607 // selected node but otherwise be in the same position of Pos. 608 // Conservatively mark it with the same -abs(Id) to assure node id 609 // invariant is preserved. 610 N->setNodeId(Pos->getNodeId()); 611 SelectionDAGISel::InvalidateNodeId(N.getNode()); 612 } 613 } 614 615 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM, 616 EVT VT, SDValue &Base, 617 SDValue &Disp) const { 618 Base = AM.Base; 619 if (!Base.getNode()) 620 // Register 0 means "no base". This is mostly useful for shifts. 621 Base = CurDAG->getRegister(0, VT); 622 else if (Base.getOpcode() == ISD::FrameIndex) { 623 // Lower a FrameIndex to a TargetFrameIndex. 624 int64_t FrameIndex = cast<FrameIndexSDNode>(Base)->getIndex(); 625 Base = CurDAG->getTargetFrameIndex(FrameIndex, VT); 626 } else if (Base.getValueType() != VT) { 627 // Truncate values from i64 to i32, for shifts. 628 assert(VT == MVT::i32 && Base.getValueType() == MVT::i64 && 629 "Unexpected truncation"); 630 SDLoc DL(Base); 631 SDValue Trunc = CurDAG->getNode(ISD::TRUNCATE, DL, VT, Base); 632 insertDAGNode(CurDAG, Base.getNode(), Trunc); 633 Base = Trunc; 634 } 635 636 // Lower the displacement to a TargetConstant. 637 Disp = CurDAG->getTargetConstant(AM.Disp, SDLoc(Base), VT); 638 } 639 640 void SystemZDAGToDAGISel::getAddressOperands(const SystemZAddressingMode &AM, 641 EVT VT, SDValue &Base, 642 SDValue &Disp, 643 SDValue &Index) const { 644 getAddressOperands(AM, VT, Base, Disp); 645 646 Index = AM.Index; 647 if (!Index.getNode()) 648 // Register 0 means "no index". 649 Index = CurDAG->getRegister(0, VT); 650 } 651 652 bool SystemZDAGToDAGISel::selectBDAddr(SystemZAddressingMode::DispRange DR, 653 SDValue Addr, SDValue &Base, 654 SDValue &Disp) const { 655 SystemZAddressingMode AM(SystemZAddressingMode::FormBD, DR); 656 if (!selectAddress(Addr, AM)) 657 return false; 658 659 getAddressOperands(AM, Addr.getValueType(), Base, Disp); 660 return true; 661 } 662 663 bool SystemZDAGToDAGISel::selectMVIAddr(SystemZAddressingMode::DispRange DR, 664 SDValue Addr, SDValue &Base, 665 SDValue &Disp) const { 666 SystemZAddressingMode AM(SystemZAddressingMode::FormBDXNormal, DR); 667 if (!selectAddress(Addr, AM) || AM.Index.getNode()) 668 return false; 669 670 getAddressOperands(AM, Addr.getValueType(), Base, Disp); 671 return true; 672 } 673 674 bool SystemZDAGToDAGISel::selectBDXAddr(SystemZAddressingMode::AddrForm Form, 675 SystemZAddressingMode::DispRange DR, 676 SDValue Addr, SDValue &Base, 677 SDValue &Disp, SDValue &Index) const { 678 SystemZAddressingMode AM(Form, DR); 679 if (!selectAddress(Addr, AM)) 680 return false; 681 682 getAddressOperands(AM, Addr.getValueType(), Base, Disp, Index); 683 return true; 684 } 685 686 bool SystemZDAGToDAGISel::selectBDVAddr12Only(SDValue Addr, SDValue Elem, 687 SDValue &Base, 688 SDValue &Disp, 689 SDValue &Index) const { 690 SDValue Regs[2]; 691 if (selectBDXAddr12Only(Addr, Regs[0], Disp, Regs[1]) && 692 Regs[0].getNode() && Regs[1].getNode()) { 693 for (unsigned int I = 0; I < 2; ++I) { 694 Base = Regs[I]; 695 Index = Regs[1 - I]; 696 // We can't tell here whether the index vector has the right type 697 // for the access; the caller needs to do that instead. 698 if (Index.getOpcode() == ISD::ZERO_EXTEND) 699 Index = Index.getOperand(0); 700 if (Index.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 701 Index.getOperand(1) == Elem) { 702 Index = Index.getOperand(0); 703 return true; 704 } 705 } 706 } 707 return false; 708 } 709 710 bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op, 711 uint64_t InsertMask) const { 712 // We're only interested in cases where the insertion is into some operand 713 // of Op, rather than into Op itself. The only useful case is an AND. 714 if (Op.getOpcode() != ISD::AND) 715 return false; 716 717 // We need a constant mask. 718 auto *MaskNode = dyn_cast<ConstantSDNode>(Op.getOperand(1).getNode()); 719 if (!MaskNode) 720 return false; 721 722 // It's not an insertion of Op.getOperand(0) if the two masks overlap. 723 uint64_t AndMask = MaskNode->getZExtValue(); 724 if (InsertMask & AndMask) 725 return false; 726 727 // It's only an insertion if all bits are covered or are known to be zero. 728 // The inner check covers all cases but is more expensive. 729 uint64_t Used = allOnes(Op.getValueSizeInBits()); 730 if (Used != (AndMask | InsertMask)) { 731 KnownBits Known; 732 CurDAG->computeKnownBits(Op.getOperand(0), Known); 733 if (Used != (AndMask | InsertMask | Known.Zero.getZExtValue())) 734 return false; 735 } 736 737 Op = Op.getOperand(0); 738 return true; 739 } 740 741 bool SystemZDAGToDAGISel::refineRxSBGMask(RxSBGOperands &RxSBG, 742 uint64_t Mask) const { 743 const SystemZInstrInfo *TII = getInstrInfo(); 744 if (RxSBG.Rotate != 0) 745 Mask = (Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate)); 746 Mask &= RxSBG.Mask; 747 if (TII->isRxSBGMask(Mask, RxSBG.BitSize, RxSBG.Start, RxSBG.End)) { 748 RxSBG.Mask = Mask; 749 return true; 750 } 751 return false; 752 } 753 754 // Return true if any bits of (RxSBG.Input & Mask) are significant. 755 static bool maskMatters(RxSBGOperands &RxSBG, uint64_t Mask) { 756 // Rotate the mask in the same way as RxSBG.Input is rotated. 757 if (RxSBG.Rotate != 0) 758 Mask = ((Mask << RxSBG.Rotate) | (Mask >> (64 - RxSBG.Rotate))); 759 return (Mask & RxSBG.Mask) != 0; 760 } 761 762 bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const { 763 SDValue N = RxSBG.Input; 764 unsigned Opcode = N.getOpcode(); 765 switch (Opcode) { 766 case ISD::TRUNCATE: { 767 if (RxSBG.Opcode == SystemZ::RNSBG) 768 return false; 769 uint64_t BitSize = N.getValueSizeInBits(); 770 uint64_t Mask = allOnes(BitSize); 771 if (!refineRxSBGMask(RxSBG, Mask)) 772 return false; 773 RxSBG.Input = N.getOperand(0); 774 return true; 775 } 776 case ISD::AND: { 777 if (RxSBG.Opcode == SystemZ::RNSBG) 778 return false; 779 780 auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 781 if (!MaskNode) 782 return false; 783 784 SDValue Input = N.getOperand(0); 785 uint64_t Mask = MaskNode->getZExtValue(); 786 if (!refineRxSBGMask(RxSBG, Mask)) { 787 // If some bits of Input are already known zeros, those bits will have 788 // been removed from the mask. See if adding them back in makes the 789 // mask suitable. 790 KnownBits Known; 791 CurDAG->computeKnownBits(Input, Known); 792 Mask |= Known.Zero.getZExtValue(); 793 if (!refineRxSBGMask(RxSBG, Mask)) 794 return false; 795 } 796 RxSBG.Input = Input; 797 return true; 798 } 799 800 case ISD::OR: { 801 if (RxSBG.Opcode != SystemZ::RNSBG) 802 return false; 803 804 auto *MaskNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 805 if (!MaskNode) 806 return false; 807 808 SDValue Input = N.getOperand(0); 809 uint64_t Mask = ~MaskNode->getZExtValue(); 810 if (!refineRxSBGMask(RxSBG, Mask)) { 811 // If some bits of Input are already known ones, those bits will have 812 // been removed from the mask. See if adding them back in makes the 813 // mask suitable. 814 KnownBits Known; 815 CurDAG->computeKnownBits(Input, Known); 816 Mask &= ~Known.One.getZExtValue(); 817 if (!refineRxSBGMask(RxSBG, Mask)) 818 return false; 819 } 820 RxSBG.Input = Input; 821 return true; 822 } 823 824 case ISD::ROTL: { 825 // Any 64-bit rotate left can be merged into the RxSBG. 826 if (RxSBG.BitSize != 64 || N.getValueType() != MVT::i64) 827 return false; 828 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 829 if (!CountNode) 830 return false; 831 832 RxSBG.Rotate = (RxSBG.Rotate + CountNode->getZExtValue()) & 63; 833 RxSBG.Input = N.getOperand(0); 834 return true; 835 } 836 837 case ISD::ANY_EXTEND: 838 // Bits above the extended operand are don't-care. 839 RxSBG.Input = N.getOperand(0); 840 return true; 841 842 case ISD::ZERO_EXTEND: 843 if (RxSBG.Opcode != SystemZ::RNSBG) { 844 // Restrict the mask to the extended operand. 845 unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits(); 846 if (!refineRxSBGMask(RxSBG, allOnes(InnerBitSize))) 847 return false; 848 849 RxSBG.Input = N.getOperand(0); 850 return true; 851 } 852 LLVM_FALLTHROUGH; 853 854 case ISD::SIGN_EXTEND: { 855 // Check that the extension bits are don't-care (i.e. are masked out 856 // by the final mask). 857 unsigned BitSize = N.getValueSizeInBits(); 858 unsigned InnerBitSize = N.getOperand(0).getValueSizeInBits(); 859 if (maskMatters(RxSBG, allOnes(BitSize) - allOnes(InnerBitSize))) { 860 // In the case where only the sign bit is active, increase Rotate with 861 // the extension width. 862 if (RxSBG.Mask == 1 && RxSBG.Rotate == 1) 863 RxSBG.Rotate += (BitSize - InnerBitSize); 864 else 865 return false; 866 } 867 868 RxSBG.Input = N.getOperand(0); 869 return true; 870 } 871 872 case ISD::SHL: { 873 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 874 if (!CountNode) 875 return false; 876 877 uint64_t Count = CountNode->getZExtValue(); 878 unsigned BitSize = N.getValueSizeInBits(); 879 if (Count < 1 || Count >= BitSize) 880 return false; 881 882 if (RxSBG.Opcode == SystemZ::RNSBG) { 883 // Treat (shl X, count) as (rotl X, size-count) as long as the bottom 884 // count bits from RxSBG.Input are ignored. 885 if (maskMatters(RxSBG, allOnes(Count))) 886 return false; 887 } else { 888 // Treat (shl X, count) as (and (rotl X, count), ~0<<count). 889 if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count) << Count)) 890 return false; 891 } 892 893 RxSBG.Rotate = (RxSBG.Rotate + Count) & 63; 894 RxSBG.Input = N.getOperand(0); 895 return true; 896 } 897 898 case ISD::SRL: 899 case ISD::SRA: { 900 auto *CountNode = dyn_cast<ConstantSDNode>(N.getOperand(1).getNode()); 901 if (!CountNode) 902 return false; 903 904 uint64_t Count = CountNode->getZExtValue(); 905 unsigned BitSize = N.getValueSizeInBits(); 906 if (Count < 1 || Count >= BitSize) 907 return false; 908 909 if (RxSBG.Opcode == SystemZ::RNSBG || Opcode == ISD::SRA) { 910 // Treat (srl|sra X, count) as (rotl X, size-count) as long as the top 911 // count bits from RxSBG.Input are ignored. 912 if (maskMatters(RxSBG, allOnes(Count) << (BitSize - Count))) 913 return false; 914 } else { 915 // Treat (srl X, count), mask) as (and (rotl X, size-count), ~0>>count), 916 // which is similar to SLL above. 917 if (!refineRxSBGMask(RxSBG, allOnes(BitSize - Count))) 918 return false; 919 } 920 921 RxSBG.Rotate = (RxSBG.Rotate - Count) & 63; 922 RxSBG.Input = N.getOperand(0); 923 return true; 924 } 925 default: 926 return false; 927 } 928 } 929 930 SDValue SystemZDAGToDAGISel::getUNDEF(const SDLoc &DL, EVT VT) const { 931 SDNode *N = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, VT); 932 return SDValue(N, 0); 933 } 934 935 SDValue SystemZDAGToDAGISel::convertTo(const SDLoc &DL, EVT VT, 936 SDValue N) const { 937 if (N.getValueType() == MVT::i32 && VT == MVT::i64) 938 return CurDAG->getTargetInsertSubreg(SystemZ::subreg_l32, 939 DL, VT, getUNDEF(DL, MVT::i64), N); 940 if (N.getValueType() == MVT::i64 && VT == MVT::i32) 941 return CurDAG->getTargetExtractSubreg(SystemZ::subreg_l32, DL, VT, N); 942 assert(N.getValueType() == VT && "Unexpected value types"); 943 return N; 944 } 945 946 bool SystemZDAGToDAGISel::tryRISBGZero(SDNode *N) { 947 SDLoc DL(N); 948 EVT VT = N->getValueType(0); 949 if (!VT.isInteger() || VT.getSizeInBits() > 64) 950 return false; 951 RxSBGOperands RISBG(SystemZ::RISBG, SDValue(N, 0)); 952 unsigned Count = 0; 953 while (expandRxSBG(RISBG)) 954 // The widening or narrowing is expected to be free. 955 // Counting widening or narrowing as a saved operation will result in 956 // preferring an R*SBG over a simple shift/logical instruction. 957 if (RISBG.Input.getOpcode() != ISD::ANY_EXTEND && 958 RISBG.Input.getOpcode() != ISD::TRUNCATE) 959 Count += 1; 960 if (Count == 0) 961 return false; 962 963 // Prefer to use normal shift instructions over RISBG, since they can handle 964 // all cases and are sometimes shorter. 965 if (Count == 1 && N->getOpcode() != ISD::AND) 966 return false; 967 968 // Prefer register extensions like LLC over RISBG. Also prefer to start 969 // out with normal ANDs if one instruction would be enough. We can convert 970 // these ANDs into an RISBG later if a three-address instruction is useful. 971 if (RISBG.Rotate == 0) { 972 bool PreferAnd = false; 973 // Prefer AND for any 32-bit and-immediate operation. 974 if (VT == MVT::i32) 975 PreferAnd = true; 976 // As well as for any 64-bit operation that can be implemented via LLC(R), 977 // LLH(R), LLGT(R), or one of the and-immediate instructions. 978 else if (RISBG.Mask == 0xff || 979 RISBG.Mask == 0xffff || 980 RISBG.Mask == 0x7fffffff || 981 SystemZ::isImmLF(~RISBG.Mask) || 982 SystemZ::isImmHF(~RISBG.Mask)) 983 PreferAnd = true; 984 // And likewise for the LLZRGF instruction, which doesn't have a register 985 // to register version. 986 else if (auto *Load = dyn_cast<LoadSDNode>(RISBG.Input)) { 987 if (Load->getMemoryVT() == MVT::i32 && 988 (Load->getExtensionType() == ISD::EXTLOAD || 989 Load->getExtensionType() == ISD::ZEXTLOAD) && 990 RISBG.Mask == 0xffffff00 && 991 Subtarget->hasLoadAndZeroRightmostByte()) 992 PreferAnd = true; 993 } 994 if (PreferAnd) { 995 // Replace the current node with an AND. Note that the current node 996 // might already be that same AND, in which case it is already CSE'd 997 // with it, and we must not call ReplaceNode. 998 SDValue In = convertTo(DL, VT, RISBG.Input); 999 SDValue Mask = CurDAG->getConstant(RISBG.Mask, DL, VT); 1000 SDValue New = CurDAG->getNode(ISD::AND, DL, VT, In, Mask); 1001 if (N != New.getNode()) { 1002 insertDAGNode(CurDAG, N, Mask); 1003 insertDAGNode(CurDAG, N, New); 1004 ReplaceNode(N, New.getNode()); 1005 N = New.getNode(); 1006 } 1007 // Now, select the machine opcode to implement this operation. 1008 if (!N->isMachineOpcode()) 1009 SelectCode(N); 1010 return true; 1011 } 1012 } 1013 1014 unsigned Opcode = SystemZ::RISBG; 1015 // Prefer RISBGN if available, since it does not clobber CC. 1016 if (Subtarget->hasMiscellaneousExtensions()) 1017 Opcode = SystemZ::RISBGN; 1018 EVT OpcodeVT = MVT::i64; 1019 if (VT == MVT::i32 && Subtarget->hasHighWord() && 1020 // We can only use the 32-bit instructions if all source bits are 1021 // in the low 32 bits without wrapping, both after rotation (because 1022 // of the smaller range for Start and End) and before rotation 1023 // (because the input value is truncated). 1024 RISBG.Start >= 32 && RISBG.End >= RISBG.Start && 1025 ((RISBG.Start + RISBG.Rotate) & 63) >= 32 && 1026 ((RISBG.End + RISBG.Rotate) & 63) >= 1027 ((RISBG.Start + RISBG.Rotate) & 63)) { 1028 Opcode = SystemZ::RISBMux; 1029 OpcodeVT = MVT::i32; 1030 RISBG.Start &= 31; 1031 RISBG.End &= 31; 1032 } 1033 SDValue Ops[5] = { 1034 getUNDEF(DL, OpcodeVT), 1035 convertTo(DL, OpcodeVT, RISBG.Input), 1036 CurDAG->getTargetConstant(RISBG.Start, DL, MVT::i32), 1037 CurDAG->getTargetConstant(RISBG.End | 128, DL, MVT::i32), 1038 CurDAG->getTargetConstant(RISBG.Rotate, DL, MVT::i32) 1039 }; 1040 SDValue New = convertTo( 1041 DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, OpcodeVT, Ops), 0)); 1042 ReplaceNode(N, New.getNode()); 1043 return true; 1044 } 1045 1046 bool SystemZDAGToDAGISel::tryRxSBG(SDNode *N, unsigned Opcode) { 1047 SDLoc DL(N); 1048 EVT VT = N->getValueType(0); 1049 if (!VT.isInteger() || VT.getSizeInBits() > 64) 1050 return false; 1051 // Try treating each operand of N as the second operand of the RxSBG 1052 // and see which goes deepest. 1053 RxSBGOperands RxSBG[] = { 1054 RxSBGOperands(Opcode, N->getOperand(0)), 1055 RxSBGOperands(Opcode, N->getOperand(1)) 1056 }; 1057 unsigned Count[] = { 0, 0 }; 1058 for (unsigned I = 0; I < 2; ++I) 1059 while (expandRxSBG(RxSBG[I])) 1060 // The widening or narrowing is expected to be free. 1061 // Counting widening or narrowing as a saved operation will result in 1062 // preferring an R*SBG over a simple shift/logical instruction. 1063 if (RxSBG[I].Input.getOpcode() != ISD::ANY_EXTEND && 1064 RxSBG[I].Input.getOpcode() != ISD::TRUNCATE) 1065 Count[I] += 1; 1066 1067 // Do nothing if neither operand is suitable. 1068 if (Count[0] == 0 && Count[1] == 0) 1069 return false; 1070 1071 // Pick the deepest second operand. 1072 unsigned I = Count[0] > Count[1] ? 0 : 1; 1073 SDValue Op0 = N->getOperand(I ^ 1); 1074 1075 // Prefer IC for character insertions from memory. 1076 if (Opcode == SystemZ::ROSBG && (RxSBG[I].Mask & 0xff) == 0) 1077 if (auto *Load = dyn_cast<LoadSDNode>(Op0.getNode())) 1078 if (Load->getMemoryVT() == MVT::i8) 1079 return false; 1080 1081 // See whether we can avoid an AND in the first operand by converting 1082 // ROSBG to RISBG. 1083 if (Opcode == SystemZ::ROSBG && detectOrAndInsertion(Op0, RxSBG[I].Mask)) { 1084 Opcode = SystemZ::RISBG; 1085 // Prefer RISBGN if available, since it does not clobber CC. 1086 if (Subtarget->hasMiscellaneousExtensions()) 1087 Opcode = SystemZ::RISBGN; 1088 } 1089 1090 SDValue Ops[5] = { 1091 convertTo(DL, MVT::i64, Op0), 1092 convertTo(DL, MVT::i64, RxSBG[I].Input), 1093 CurDAG->getTargetConstant(RxSBG[I].Start, DL, MVT::i32), 1094 CurDAG->getTargetConstant(RxSBG[I].End, DL, MVT::i32), 1095 CurDAG->getTargetConstant(RxSBG[I].Rotate, DL, MVT::i32) 1096 }; 1097 SDValue New = convertTo( 1098 DL, VT, SDValue(CurDAG->getMachineNode(Opcode, DL, MVT::i64, Ops), 0)); 1099 ReplaceNode(N, New.getNode()); 1100 return true; 1101 } 1102 1103 void SystemZDAGToDAGISel::splitLargeImmediate(unsigned Opcode, SDNode *Node, 1104 SDValue Op0, uint64_t UpperVal, 1105 uint64_t LowerVal) { 1106 EVT VT = Node->getValueType(0); 1107 SDLoc DL(Node); 1108 SDValue Upper = CurDAG->getConstant(UpperVal, DL, VT); 1109 if (Op0.getNode()) 1110 Upper = CurDAG->getNode(Opcode, DL, VT, Op0, Upper); 1111 1112 { 1113 // When we haven't passed in Op0, Upper will be a constant. In order to 1114 // prevent folding back to the large immediate in `Or = getNode(...)` we run 1115 // SelectCode first and end up with an opaque machine node. This means that 1116 // we need to use a handle to keep track of Upper in case it gets CSE'd by 1117 // SelectCode. 1118 // 1119 // Note that in the case where Op0 is passed in we could just call 1120 // SelectCode(Upper) later, along with the SelectCode(Or), and avoid needing 1121 // the handle at all, but it's fine to do it here. 1122 // 1123 // TODO: This is a pretty hacky way to do this. Can we do something that 1124 // doesn't require a two paragraph explanation? 1125 HandleSDNode Handle(Upper); 1126 SelectCode(Upper.getNode()); 1127 Upper = Handle.getValue(); 1128 } 1129 1130 SDValue Lower = CurDAG->getConstant(LowerVal, DL, VT); 1131 SDValue Or = CurDAG->getNode(Opcode, DL, VT, Upper, Lower); 1132 1133 ReplaceNode(Node, Or.getNode()); 1134 1135 SelectCode(Or.getNode()); 1136 } 1137 1138 bool SystemZDAGToDAGISel::tryGather(SDNode *N, unsigned Opcode) { 1139 SDValue ElemV = N->getOperand(2); 1140 auto *ElemN = dyn_cast<ConstantSDNode>(ElemV); 1141 if (!ElemN) 1142 return false; 1143 1144 unsigned Elem = ElemN->getZExtValue(); 1145 EVT VT = N->getValueType(0); 1146 if (Elem >= VT.getVectorNumElements()) 1147 return false; 1148 1149 auto *Load = dyn_cast<LoadSDNode>(N->getOperand(1)); 1150 if (!Load || !Load->hasNUsesOfValue(1, 0)) 1151 return false; 1152 if (Load->getMemoryVT().getSizeInBits() != 1153 Load->getValueType(0).getSizeInBits()) 1154 return false; 1155 1156 SDValue Base, Disp, Index; 1157 if (!selectBDVAddr12Only(Load->getBasePtr(), ElemV, Base, Disp, Index) || 1158 Index.getValueType() != VT.changeVectorElementTypeToInteger()) 1159 return false; 1160 1161 SDLoc DL(Load); 1162 SDValue Ops[] = { 1163 N->getOperand(0), Base, Disp, Index, 1164 CurDAG->getTargetConstant(Elem, DL, MVT::i32), Load->getChain() 1165 }; 1166 SDNode *Res = CurDAG->getMachineNode(Opcode, DL, VT, MVT::Other, Ops); 1167 ReplaceUses(SDValue(Load, 1), SDValue(Res, 1)); 1168 ReplaceNode(N, Res); 1169 return true; 1170 } 1171 1172 bool SystemZDAGToDAGISel::tryScatter(StoreSDNode *Store, unsigned Opcode) { 1173 SDValue Value = Store->getValue(); 1174 if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 1175 return false; 1176 if (Store->getMemoryVT().getSizeInBits() != Value.getValueSizeInBits()) 1177 return false; 1178 1179 SDValue ElemV = Value.getOperand(1); 1180 auto *ElemN = dyn_cast<ConstantSDNode>(ElemV); 1181 if (!ElemN) 1182 return false; 1183 1184 SDValue Vec = Value.getOperand(0); 1185 EVT VT = Vec.getValueType(); 1186 unsigned Elem = ElemN->getZExtValue(); 1187 if (Elem >= VT.getVectorNumElements()) 1188 return false; 1189 1190 SDValue Base, Disp, Index; 1191 if (!selectBDVAddr12Only(Store->getBasePtr(), ElemV, Base, Disp, Index) || 1192 Index.getValueType() != VT.changeVectorElementTypeToInteger()) 1193 return false; 1194 1195 SDLoc DL(Store); 1196 SDValue Ops[] = { 1197 Vec, Base, Disp, Index, CurDAG->getTargetConstant(Elem, DL, MVT::i32), 1198 Store->getChain() 1199 }; 1200 ReplaceNode(Store, CurDAG->getMachineNode(Opcode, DL, MVT::Other, Ops)); 1201 return true; 1202 } 1203 1204 // Check whether or not the chain ending in StoreNode is suitable for doing 1205 // the {load; op; store} to modify transformation. 1206 static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode, 1207 SDValue StoredVal, SelectionDAG *CurDAG, 1208 LoadSDNode *&LoadNode, 1209 SDValue &InputChain) { 1210 // Is the stored value result 0 of the operation? 1211 if (StoredVal.getResNo() != 0) 1212 return false; 1213 1214 // Are there other uses of the loaded value than the operation? 1215 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) 1216 return false; 1217 1218 // Is the store non-extending and non-indexed? 1219 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal()) 1220 return false; 1221 1222 SDValue Load = StoredVal->getOperand(0); 1223 // Is the stored value a non-extending and non-indexed load? 1224 if (!ISD::isNormalLoad(Load.getNode())) 1225 return false; 1226 1227 // Return LoadNode by reference. 1228 LoadNode = cast<LoadSDNode>(Load); 1229 1230 // Is store the only read of the loaded value? 1231 if (!Load.hasOneUse()) 1232 return false; 1233 1234 // Is the address of the store the same as the load? 1235 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() || 1236 LoadNode->getOffset() != StoreNode->getOffset()) 1237 return false; 1238 1239 // Check if the chain is produced by the load or is a TokenFactor with 1240 // the load output chain as an operand. Return InputChain by reference. 1241 SDValue Chain = StoreNode->getChain(); 1242 1243 bool ChainCheck = false; 1244 if (Chain == Load.getValue(1)) { 1245 ChainCheck = true; 1246 InputChain = LoadNode->getChain(); 1247 } else if (Chain.getOpcode() == ISD::TokenFactor) { 1248 SmallVector<SDValue, 4> ChainOps; 1249 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) { 1250 SDValue Op = Chain.getOperand(i); 1251 if (Op == Load.getValue(1)) { 1252 ChainCheck = true; 1253 // Drop Load, but keep its chain. No cycle check necessary. 1254 ChainOps.push_back(Load.getOperand(0)); 1255 continue; 1256 } 1257 1258 // Make sure using Op as part of the chain would not cause a cycle here. 1259 // In theory, we could check whether the chain node is a predecessor of 1260 // the load. But that can be very expensive. Instead visit the uses and 1261 // make sure they all have smaller node id than the load. 1262 int LoadId = LoadNode->getNodeId(); 1263 for (SDNode::use_iterator UI = Op.getNode()->use_begin(), 1264 UE = UI->use_end(); UI != UE; ++UI) { 1265 if (UI.getUse().getResNo() != 0) 1266 continue; 1267 if (UI->getNodeId() > LoadId) 1268 return false; 1269 } 1270 1271 ChainOps.push_back(Op); 1272 } 1273 1274 if (ChainCheck) 1275 // Make a new TokenFactor with all the other input chains except 1276 // for the load. 1277 InputChain = CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), 1278 MVT::Other, ChainOps); 1279 } 1280 if (!ChainCheck) 1281 return false; 1282 1283 return true; 1284 } 1285 1286 // Change a chain of {load; op; store} of the same value into a simple op 1287 // through memory of that value, if the uses of the modified value and its 1288 // address are suitable. 1289 // 1290 // The tablegen pattern memory operand pattern is currently not able to match 1291 // the case where the CC on the original operation are used. 1292 // 1293 // See the equivalent routine in X86ISelDAGToDAG for further comments. 1294 bool SystemZDAGToDAGISel::tryFoldLoadStoreIntoMemOperand(SDNode *Node) { 1295 StoreSDNode *StoreNode = cast<StoreSDNode>(Node); 1296 SDValue StoredVal = StoreNode->getOperand(1); 1297 unsigned Opc = StoredVal->getOpcode(); 1298 SDLoc DL(StoreNode); 1299 1300 // Before we try to select anything, make sure this is memory operand size 1301 // and opcode we can handle. Note that this must match the code below that 1302 // actually lowers the opcodes. 1303 EVT MemVT = StoreNode->getMemoryVT(); 1304 unsigned NewOpc = 0; 1305 bool NegateOperand = false; 1306 switch (Opc) { 1307 default: 1308 return false; 1309 case SystemZISD::SSUBO: 1310 NegateOperand = true; 1311 LLVM_FALLTHROUGH; 1312 case SystemZISD::SADDO: 1313 if (MemVT == MVT::i32) 1314 NewOpc = SystemZ::ASI; 1315 else if (MemVT == MVT::i64) 1316 NewOpc = SystemZ::AGSI; 1317 else 1318 return false; 1319 break; 1320 case SystemZISD::USUBO: 1321 NegateOperand = true; 1322 LLVM_FALLTHROUGH; 1323 case SystemZISD::UADDO: 1324 if (MemVT == MVT::i32) 1325 NewOpc = SystemZ::ALSI; 1326 else if (MemVT == MVT::i64) 1327 NewOpc = SystemZ::ALGSI; 1328 else 1329 return false; 1330 break; 1331 } 1332 1333 LoadSDNode *LoadNode = nullptr; 1334 SDValue InputChain; 1335 if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadNode, 1336 InputChain)) 1337 return false; 1338 1339 SDValue Operand = StoredVal.getOperand(1); 1340 auto *OperandC = dyn_cast<ConstantSDNode>(Operand); 1341 if (!OperandC) 1342 return false; 1343 auto OperandV = OperandC->getAPIntValue(); 1344 if (NegateOperand) 1345 OperandV = -OperandV; 1346 if (OperandV.getMinSignedBits() > 8) 1347 return false; 1348 Operand = CurDAG->getTargetConstant(OperandV, DL, MemVT); 1349 1350 SDValue Base, Disp; 1351 if (!selectBDAddr20Only(StoreNode->getBasePtr(), Base, Disp)) 1352 return false; 1353 1354 SDValue Ops[] = { Base, Disp, Operand, InputChain }; 1355 MachineSDNode *Result = 1356 CurDAG->getMachineNode(NewOpc, DL, MVT::i32, MVT::Other, Ops); 1357 CurDAG->setNodeMemRefs( 1358 Result, {StoreNode->getMemOperand(), LoadNode->getMemOperand()}); 1359 1360 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1)); 1361 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0)); 1362 CurDAG->RemoveDeadNode(Node); 1363 return true; 1364 } 1365 1366 bool SystemZDAGToDAGISel::canUseBlockOperation(StoreSDNode *Store, 1367 LoadSDNode *Load) const { 1368 // Check that the two memory operands have the same size. 1369 if (Load->getMemoryVT() != Store->getMemoryVT()) 1370 return false; 1371 1372 // Volatility stops an access from being decomposed. 1373 if (Load->isVolatile() || Store->isVolatile()) 1374 return false; 1375 1376 // There's no chance of overlap if the load is invariant. 1377 if (Load->isInvariant() && Load->isDereferenceable()) 1378 return true; 1379 1380 // Otherwise we need to check whether there's an alias. 1381 const Value *V1 = Load->getMemOperand()->getValue(); 1382 const Value *V2 = Store->getMemOperand()->getValue(); 1383 if (!V1 || !V2) 1384 return false; 1385 1386 // Reject equality. 1387 uint64_t Size = Load->getMemoryVT().getStoreSize(); 1388 int64_t End1 = Load->getSrcValueOffset() + Size; 1389 int64_t End2 = Store->getSrcValueOffset() + Size; 1390 if (V1 == V2 && End1 == End2) 1391 return false; 1392 1393 return !AA->alias(MemoryLocation(V1, End1, Load->getAAInfo()), 1394 MemoryLocation(V2, End2, Store->getAAInfo())); 1395 } 1396 1397 bool SystemZDAGToDAGISel::storeLoadCanUseMVC(SDNode *N) const { 1398 auto *Store = cast<StoreSDNode>(N); 1399 auto *Load = cast<LoadSDNode>(Store->getValue()); 1400 1401 // Prefer not to use MVC if either address can use ... RELATIVE LONG 1402 // instructions. 1403 uint64_t Size = Load->getMemoryVT().getStoreSize(); 1404 if (Size > 1 && Size <= 8) { 1405 // Prefer LHRL, LRL and LGRL. 1406 if (SystemZISD::isPCREL(Load->getBasePtr().getOpcode())) 1407 return false; 1408 // Prefer STHRL, STRL and STGRL. 1409 if (SystemZISD::isPCREL(Store->getBasePtr().getOpcode())) 1410 return false; 1411 } 1412 1413 return canUseBlockOperation(Store, Load); 1414 } 1415 1416 bool SystemZDAGToDAGISel::storeLoadCanUseBlockBinary(SDNode *N, 1417 unsigned I) const { 1418 auto *StoreA = cast<StoreSDNode>(N); 1419 auto *LoadA = cast<LoadSDNode>(StoreA->getValue().getOperand(1 - I)); 1420 auto *LoadB = cast<LoadSDNode>(StoreA->getValue().getOperand(I)); 1421 return !LoadA->isVolatile() && canUseBlockOperation(StoreA, LoadB); 1422 } 1423 1424 void SystemZDAGToDAGISel::Select(SDNode *Node) { 1425 // If we have a custom node, we already have selected! 1426 if (Node->isMachineOpcode()) { 1427 LLVM_DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); 1428 Node->setNodeId(-1); 1429 return; 1430 } 1431 1432 unsigned Opcode = Node->getOpcode(); 1433 switch (Opcode) { 1434 case ISD::OR: 1435 if (Node->getOperand(1).getOpcode() != ISD::Constant) 1436 if (tryRxSBG(Node, SystemZ::ROSBG)) 1437 return; 1438 goto or_xor; 1439 1440 case ISD::XOR: 1441 if (Node->getOperand(1).getOpcode() != ISD::Constant) 1442 if (tryRxSBG(Node, SystemZ::RXSBG)) 1443 return; 1444 // Fall through. 1445 or_xor: 1446 // If this is a 64-bit operation in which both 32-bit halves are nonzero, 1447 // split the operation into two. If both operands here happen to be 1448 // constant, leave this to common code to optimize. 1449 if (Node->getValueType(0) == MVT::i64 && 1450 Node->getOperand(0).getOpcode() != ISD::Constant) 1451 if (auto *Op1 = dyn_cast<ConstantSDNode>(Node->getOperand(1))) { 1452 uint64_t Val = Op1->getZExtValue(); 1453 if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val)) { 1454 splitLargeImmediate(Opcode, Node, Node->getOperand(0), 1455 Val - uint32_t(Val), uint32_t(Val)); 1456 return; 1457 } 1458 } 1459 break; 1460 1461 case ISD::AND: 1462 if (Node->getOperand(1).getOpcode() != ISD::Constant) 1463 if (tryRxSBG(Node, SystemZ::RNSBG)) 1464 return; 1465 LLVM_FALLTHROUGH; 1466 case ISD::ROTL: 1467 case ISD::SHL: 1468 case ISD::SRL: 1469 case ISD::ZERO_EXTEND: 1470 if (tryRISBGZero(Node)) 1471 return; 1472 break; 1473 1474 case ISD::Constant: 1475 // If this is a 64-bit constant that is out of the range of LLILF, 1476 // LLIHF and LGFI, split it into two 32-bit pieces. 1477 if (Node->getValueType(0) == MVT::i64) { 1478 uint64_t Val = cast<ConstantSDNode>(Node)->getZExtValue(); 1479 if (!SystemZ::isImmLF(Val) && !SystemZ::isImmHF(Val) && !isInt<32>(Val)) { 1480 splitLargeImmediate(ISD::OR, Node, SDValue(), Val - uint32_t(Val), 1481 uint32_t(Val)); 1482 return; 1483 } 1484 } 1485 break; 1486 1487 case SystemZISD::SELECT_CCMASK: { 1488 SDValue Op0 = Node->getOperand(0); 1489 SDValue Op1 = Node->getOperand(1); 1490 // Prefer to put any load first, so that it can be matched as a 1491 // conditional load. Likewise for constants in range for LOCHI. 1492 if ((Op1.getOpcode() == ISD::LOAD && Op0.getOpcode() != ISD::LOAD) || 1493 (Subtarget->hasLoadStoreOnCond2() && 1494 Node->getValueType(0).isInteger() && 1495 Op1.getOpcode() == ISD::Constant && 1496 isInt<16>(cast<ConstantSDNode>(Op1)->getSExtValue()) && 1497 !(Op0.getOpcode() == ISD::Constant && 1498 isInt<16>(cast<ConstantSDNode>(Op0)->getSExtValue())))) { 1499 SDValue CCValid = Node->getOperand(2); 1500 SDValue CCMask = Node->getOperand(3); 1501 uint64_t ConstCCValid = 1502 cast<ConstantSDNode>(CCValid.getNode())->getZExtValue(); 1503 uint64_t ConstCCMask = 1504 cast<ConstantSDNode>(CCMask.getNode())->getZExtValue(); 1505 // Invert the condition. 1506 CCMask = CurDAG->getConstant(ConstCCValid ^ ConstCCMask, SDLoc(Node), 1507 CCMask.getValueType()); 1508 SDValue Op4 = Node->getOperand(4); 1509 SDNode *UpdatedNode = 1510 CurDAG->UpdateNodeOperands(Node, Op1, Op0, CCValid, CCMask, Op4); 1511 if (UpdatedNode != Node) { 1512 // In case this node already exists then replace Node with it. 1513 ReplaceNode(Node, UpdatedNode); 1514 Node = UpdatedNode; 1515 } 1516 } 1517 break; 1518 } 1519 1520 case ISD::INSERT_VECTOR_ELT: { 1521 EVT VT = Node->getValueType(0); 1522 unsigned ElemBitSize = VT.getScalarSizeInBits(); 1523 if (ElemBitSize == 32) { 1524 if (tryGather(Node, SystemZ::VGEF)) 1525 return; 1526 } else if (ElemBitSize == 64) { 1527 if (tryGather(Node, SystemZ::VGEG)) 1528 return; 1529 } 1530 break; 1531 } 1532 1533 case ISD::STORE: { 1534 if (tryFoldLoadStoreIntoMemOperand(Node)) 1535 return; 1536 auto *Store = cast<StoreSDNode>(Node); 1537 unsigned ElemBitSize = Store->getValue().getValueSizeInBits(); 1538 if (ElemBitSize == 32) { 1539 if (tryScatter(Store, SystemZ::VSCEF)) 1540 return; 1541 } else if (ElemBitSize == 64) { 1542 if (tryScatter(Store, SystemZ::VSCEG)) 1543 return; 1544 } 1545 break; 1546 } 1547 } 1548 1549 SelectCode(Node); 1550 } 1551 1552 bool SystemZDAGToDAGISel:: 1553 SelectInlineAsmMemoryOperand(const SDValue &Op, 1554 unsigned ConstraintID, 1555 std::vector<SDValue> &OutOps) { 1556 SystemZAddressingMode::AddrForm Form; 1557 SystemZAddressingMode::DispRange DispRange; 1558 SDValue Base, Disp, Index; 1559 1560 switch(ConstraintID) { 1561 default: 1562 llvm_unreachable("Unexpected asm memory constraint"); 1563 case InlineAsm::Constraint_i: 1564 case InlineAsm::Constraint_Q: 1565 // Accept an address with a short displacement, but no index. 1566 Form = SystemZAddressingMode::FormBD; 1567 DispRange = SystemZAddressingMode::Disp12Only; 1568 break; 1569 case InlineAsm::Constraint_R: 1570 // Accept an address with a short displacement and an index. 1571 Form = SystemZAddressingMode::FormBDXNormal; 1572 DispRange = SystemZAddressingMode::Disp12Only; 1573 break; 1574 case InlineAsm::Constraint_S: 1575 // Accept an address with a long displacement, but no index. 1576 Form = SystemZAddressingMode::FormBD; 1577 DispRange = SystemZAddressingMode::Disp20Only; 1578 break; 1579 case InlineAsm::Constraint_T: 1580 case InlineAsm::Constraint_m: 1581 case InlineAsm::Constraint_o: 1582 // Accept an address with a long displacement and an index. 1583 // m works the same as T, as this is the most general case. 1584 // We don't really have any special handling of "offsettable" 1585 // memory addresses, so just treat o the same as m. 1586 Form = SystemZAddressingMode::FormBDXNormal; 1587 DispRange = SystemZAddressingMode::Disp20Only; 1588 break; 1589 } 1590 1591 if (selectBDXAddr(Form, DispRange, Op, Base, Disp, Index)) { 1592 const TargetRegisterClass *TRC = 1593 Subtarget->getRegisterInfo()->getPointerRegClass(*MF); 1594 SDLoc DL(Base); 1595 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), DL, MVT::i32); 1596 1597 // Make sure that the base address doesn't go into %r0. 1598 // If it's a TargetFrameIndex or a fixed register, we shouldn't do anything. 1599 if (Base.getOpcode() != ISD::TargetFrameIndex && 1600 Base.getOpcode() != ISD::Register) { 1601 Base = 1602 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 1603 DL, Base.getValueType(), 1604 Base, RC), 0); 1605 } 1606 1607 // Make sure that the index register isn't assigned to %r0 either. 1608 if (Index.getOpcode() != ISD::Register) { 1609 Index = 1610 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 1611 DL, Index.getValueType(), 1612 Index, RC), 0); 1613 } 1614 1615 OutOps.push_back(Base); 1616 OutOps.push_back(Disp); 1617 OutOps.push_back(Index); 1618 return false; 1619 } 1620 1621 return true; 1622 } 1623 1624 // IsProfitableToFold - Returns true if is profitable to fold the specific 1625 // operand node N of U during instruction selection that starts at Root. 1626 bool 1627 SystemZDAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, 1628 SDNode *Root) const { 1629 // We want to avoid folding a LOAD into an ICMP node if as a result 1630 // we would be forced to spill the condition code into a GPR. 1631 if (N.getOpcode() == ISD::LOAD && U->getOpcode() == SystemZISD::ICMP) { 1632 if (!N.hasOneUse() || !U->hasOneUse()) 1633 return false; 1634 1635 // The user of the CC value will usually be a CopyToReg into the 1636 // physical CC register, which in turn is glued and chained to the 1637 // actual instruction that uses the CC value. Bail out if we have 1638 // anything else than that. 1639 SDNode *CCUser = *U->use_begin(); 1640 SDNode *CCRegUser = nullptr; 1641 if (CCUser->getOpcode() == ISD::CopyToReg || 1642 cast<RegisterSDNode>(CCUser->getOperand(1))->getReg() == SystemZ::CC) { 1643 for (auto *U : CCUser->uses()) { 1644 if (CCRegUser == nullptr) 1645 CCRegUser = U; 1646 else if (CCRegUser != U) 1647 return false; 1648 } 1649 } 1650 if (CCRegUser == nullptr) 1651 return false; 1652 1653 // If the actual instruction is a branch, the only thing that remains to be 1654 // checked is whether the CCUser chain is a predecessor of the load. 1655 if (CCRegUser->isMachineOpcode() && 1656 CCRegUser->getMachineOpcode() == SystemZ::BRC) 1657 return !N->isPredecessorOf(CCUser->getOperand(0).getNode()); 1658 1659 // Otherwise, the instruction may have multiple operands, and we need to 1660 // verify that none of them are a predecessor of the load. This is exactly 1661 // the same check that would be done by common code if the CC setter were 1662 // glued to the CC user, so simply invoke that check here. 1663 if (!IsLegalToFold(N, U, CCRegUser, OptLevel, false)) 1664 return false; 1665 } 1666 1667 return true; 1668 } 1669 1670 namespace { 1671 // Represents a sequence for extracting a 0/1 value from an IPM result: 1672 // (((X ^ XORValue) + AddValue) >> Bit) 1673 struct IPMConversion { 1674 IPMConversion(unsigned xorValue, int64_t addValue, unsigned bit) 1675 : XORValue(xorValue), AddValue(addValue), Bit(bit) {} 1676 1677 int64_t XORValue; 1678 int64_t AddValue; 1679 unsigned Bit; 1680 }; 1681 } // end anonymous namespace 1682 1683 // Return a sequence for getting a 1 from an IPM result when CC has a 1684 // value in CCMask and a 0 when CC has a value in CCValid & ~CCMask. 1685 // The handling of CC values outside CCValid doesn't matter. 1686 static IPMConversion getIPMConversion(unsigned CCValid, unsigned CCMask) { 1687 // Deal with cases where the result can be taken directly from a bit 1688 // of the IPM result. 1689 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_3))) 1690 return IPMConversion(0, 0, SystemZ::IPM_CC); 1691 if (CCMask == (CCValid & (SystemZ::CCMASK_2 | SystemZ::CCMASK_3))) 1692 return IPMConversion(0, 0, SystemZ::IPM_CC + 1); 1693 1694 // Deal with cases where we can add a value to force the sign bit 1695 // to contain the right value. Putting the bit in 31 means we can 1696 // use SRL rather than RISBG(L), and also makes it easier to get a 1697 // 0/-1 value, so it has priority over the other tests below. 1698 // 1699 // These sequences rely on the fact that the upper two bits of the 1700 // IPM result are zero. 1701 uint64_t TopBit = uint64_t(1) << 31; 1702 if (CCMask == (CCValid & SystemZ::CCMASK_0)) 1703 return IPMConversion(0, -(1 << SystemZ::IPM_CC), 31); 1704 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_1))) 1705 return IPMConversion(0, -(2 << SystemZ::IPM_CC), 31); 1706 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1707 | SystemZ::CCMASK_1 1708 | SystemZ::CCMASK_2))) 1709 return IPMConversion(0, -(3 << SystemZ::IPM_CC), 31); 1710 if (CCMask == (CCValid & SystemZ::CCMASK_3)) 1711 return IPMConversion(0, TopBit - (3 << SystemZ::IPM_CC), 31); 1712 if (CCMask == (CCValid & (SystemZ::CCMASK_1 1713 | SystemZ::CCMASK_2 1714 | SystemZ::CCMASK_3))) 1715 return IPMConversion(0, TopBit - (1 << SystemZ::IPM_CC), 31); 1716 1717 // Next try inverting the value and testing a bit. 0/1 could be 1718 // handled this way too, but we dealt with that case above. 1719 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_2))) 1720 return IPMConversion(-1, 0, SystemZ::IPM_CC); 1721 1722 // Handle cases where adding a value forces a non-sign bit to contain 1723 // the right value. 1724 if (CCMask == (CCValid & (SystemZ::CCMASK_1 | SystemZ::CCMASK_2))) 1725 return IPMConversion(0, 1 << SystemZ::IPM_CC, SystemZ::IPM_CC + 1); 1726 if (CCMask == (CCValid & (SystemZ::CCMASK_0 | SystemZ::CCMASK_3))) 1727 return IPMConversion(0, -(1 << SystemZ::IPM_CC), SystemZ::IPM_CC + 1); 1728 1729 // The remaining cases are 1, 2, 0/1/3 and 0/2/3. All these are 1730 // can be done by inverting the low CC bit and applying one of the 1731 // sign-based extractions above. 1732 if (CCMask == (CCValid & SystemZ::CCMASK_1)) 1733 return IPMConversion(1 << SystemZ::IPM_CC, -(1 << SystemZ::IPM_CC), 31); 1734 if (CCMask == (CCValid & SystemZ::CCMASK_2)) 1735 return IPMConversion(1 << SystemZ::IPM_CC, 1736 TopBit - (3 << SystemZ::IPM_CC), 31); 1737 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1738 | SystemZ::CCMASK_1 1739 | SystemZ::CCMASK_3))) 1740 return IPMConversion(1 << SystemZ::IPM_CC, -(3 << SystemZ::IPM_CC), 31); 1741 if (CCMask == (CCValid & (SystemZ::CCMASK_0 1742 | SystemZ::CCMASK_2 1743 | SystemZ::CCMASK_3))) 1744 return IPMConversion(1 << SystemZ::IPM_CC, 1745 TopBit - (1 << SystemZ::IPM_CC), 31); 1746 1747 llvm_unreachable("Unexpected CC combination"); 1748 } 1749 1750 SDValue SystemZDAGToDAGISel::expandSelectBoolean(SDNode *Node) { 1751 auto *TrueOp = dyn_cast<ConstantSDNode>(Node->getOperand(0)); 1752 auto *FalseOp = dyn_cast<ConstantSDNode>(Node->getOperand(1)); 1753 if (!TrueOp || !FalseOp) 1754 return SDValue(); 1755 if (FalseOp->getZExtValue() != 0) 1756 return SDValue(); 1757 if (TrueOp->getSExtValue() != 1 && TrueOp->getSExtValue() != -1) 1758 return SDValue(); 1759 1760 auto *CCValidOp = dyn_cast<ConstantSDNode>(Node->getOperand(2)); 1761 auto *CCMaskOp = dyn_cast<ConstantSDNode>(Node->getOperand(3)); 1762 if (!CCValidOp || !CCMaskOp) 1763 return SDValue(); 1764 int CCValid = CCValidOp->getZExtValue(); 1765 int CCMask = CCMaskOp->getZExtValue(); 1766 1767 SDLoc DL(Node); 1768 SDValue CCReg = Node->getOperand(4); 1769 IPMConversion IPM = getIPMConversion(CCValid, CCMask); 1770 SDValue Result = CurDAG->getNode(SystemZISD::IPM, DL, MVT::i32, CCReg); 1771 1772 if (IPM.XORValue) 1773 Result = CurDAG->getNode(ISD::XOR, DL, MVT::i32, Result, 1774 CurDAG->getConstant(IPM.XORValue, DL, MVT::i32)); 1775 1776 if (IPM.AddValue) 1777 Result = CurDAG->getNode(ISD::ADD, DL, MVT::i32, Result, 1778 CurDAG->getConstant(IPM.AddValue, DL, MVT::i32)); 1779 1780 EVT VT = Node->getValueType(0); 1781 if (VT == MVT::i32 && IPM.Bit == 31) { 1782 unsigned ShiftOp = TrueOp->getSExtValue() == 1 ? ISD::SRL : ISD::SRA; 1783 Result = CurDAG->getNode(ShiftOp, DL, MVT::i32, Result, 1784 CurDAG->getConstant(IPM.Bit, DL, MVT::i32)); 1785 } else { 1786 if (VT != MVT::i32) 1787 Result = CurDAG->getNode(ISD::ANY_EXTEND, DL, VT, Result); 1788 1789 if (TrueOp->getSExtValue() == 1) { 1790 // The SHR/AND sequence should get optimized to an RISBG. 1791 Result = CurDAG->getNode(ISD::SRL, DL, VT, Result, 1792 CurDAG->getConstant(IPM.Bit, DL, MVT::i32)); 1793 Result = CurDAG->getNode(ISD::AND, DL, VT, Result, 1794 CurDAG->getConstant(1, DL, VT)); 1795 } else { 1796 // Sign-extend from IPM.Bit using a pair of shifts. 1797 int ShlAmt = VT.getSizeInBits() - 1 - IPM.Bit; 1798 int SraAmt = VT.getSizeInBits() - 1; 1799 Result = CurDAG->getNode(ISD::SHL, DL, VT, Result, 1800 CurDAG->getConstant(ShlAmt, DL, MVT::i32)); 1801 Result = CurDAG->getNode(ISD::SRA, DL, VT, Result, 1802 CurDAG->getConstant(SraAmt, DL, MVT::i32)); 1803 } 1804 } 1805 1806 return Result; 1807 } 1808 1809 void SystemZDAGToDAGISel::PreprocessISelDAG() { 1810 // If we have conditional immediate loads, we always prefer 1811 // using those over an IPM sequence. 1812 if (Subtarget->hasLoadStoreOnCond2()) 1813 return; 1814 1815 bool MadeChange = false; 1816 1817 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), 1818 E = CurDAG->allnodes_end(); 1819 I != E;) { 1820 SDNode *N = &*I++; 1821 if (N->use_empty()) 1822 continue; 1823 1824 SDValue Res; 1825 switch (N->getOpcode()) { 1826 default: break; 1827 case SystemZISD::SELECT_CCMASK: 1828 Res = expandSelectBoolean(N); 1829 break; 1830 } 1831 1832 if (Res) { 1833 LLVM_DEBUG(dbgs() << "SystemZ DAG preprocessing replacing:\nOld: "); 1834 LLVM_DEBUG(N->dump(CurDAG)); 1835 LLVM_DEBUG(dbgs() << "\nNew: "); 1836 LLVM_DEBUG(Res.getNode()->dump(CurDAG)); 1837 LLVM_DEBUG(dbgs() << "\n"); 1838 1839 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); 1840 MadeChange = true; 1841 } 1842 } 1843 1844 if (MadeChange) 1845 CurDAG->RemoveDeadNodes(); 1846 } 1847