1 //===-- DAGCombiner.cpp - Implement a DAG node combiner -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass combines dag nodes to form fewer, simpler DAG nodes. It can be run 11 // both before and after the DAG is legalized. 12 // 13 // This pass is not a substitute for the LLVM IR instcombine pass. This pass is 14 // primarily intended to handle simplification opportunities that are implicit 15 // in the LLVM IR and exposed by the various codegen lowering phases. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/ADT/SetVector.h" 20 #include "llvm/ADT/SmallBitVector.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/SelectionDAG.h" 28 #include "llvm/CodeGen/SelectionDAGTargetInfo.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DerivedTypes.h" 31 #include "llvm/IR/Function.h" 32 #include "llvm/IR/LLVMContext.h" 33 #include "llvm/Support/CommandLine.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include "llvm/Target/TargetLowering.h" 39 #include "llvm/Target/TargetOptions.h" 40 #include "llvm/Target/TargetRegisterInfo.h" 41 #include "llvm/Target/TargetSubtargetInfo.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 #define DEBUG_TYPE "dagcombine" 46 47 STATISTIC(NodesCombined , "Number of dag nodes combined"); 48 STATISTIC(PreIndexedNodes , "Number of pre-indexed nodes created"); 49 STATISTIC(PostIndexedNodes, "Number of post-indexed nodes created"); 50 STATISTIC(OpsNarrowed , "Number of load/op/store narrowed"); 51 STATISTIC(LdStFP2Int , "Number of fp load/store pairs transformed to int"); 52 STATISTIC(SlicedLoads, "Number of load sliced"); 53 54 namespace { 55 static cl::opt<bool> 56 CombinerGlobalAA("combiner-global-alias-analysis", cl::Hidden, 57 cl::desc("Enable DAG combiner's use of IR alias analysis")); 58 59 static cl::opt<bool> 60 UseTBAA("combiner-use-tbaa", cl::Hidden, cl::init(true), 61 cl::desc("Enable DAG combiner's use of TBAA")); 62 63 #ifndef NDEBUG 64 static cl::opt<std::string> 65 CombinerAAOnlyFunc("combiner-aa-only-func", cl::Hidden, 66 cl::desc("Only use DAG-combiner alias analysis in this" 67 " function")); 68 #endif 69 70 /// Hidden option to stress test load slicing, i.e., when this option 71 /// is enabled, load slicing bypasses most of its profitability guards. 72 static cl::opt<bool> 73 StressLoadSlicing("combiner-stress-load-slicing", cl::Hidden, 74 cl::desc("Bypass the profitability model of load " 75 "slicing"), 76 cl::init(false)); 77 78 static cl::opt<bool> 79 MaySplitLoadIndex("combiner-split-load-index", cl::Hidden, cl::init(true), 80 cl::desc("DAG combiner may split indexing from loads")); 81 82 //------------------------------ DAGCombiner ---------------------------------// 83 84 class DAGCombiner { 85 SelectionDAG &DAG; 86 const TargetLowering &TLI; 87 CombineLevel Level; 88 CodeGenOpt::Level OptLevel; 89 bool LegalOperations; 90 bool LegalTypes; 91 bool ForCodeSize; 92 93 /// \brief Worklist of all of the nodes that need to be simplified. 94 /// 95 /// This must behave as a stack -- new nodes to process are pushed onto the 96 /// back and when processing we pop off of the back. 97 /// 98 /// The worklist will not contain duplicates but may contain null entries 99 /// due to nodes being deleted from the underlying DAG. 100 SmallVector<SDNode *, 64> Worklist; 101 102 /// \brief Mapping from an SDNode to its position on the worklist. 103 /// 104 /// This is used to find and remove nodes from the worklist (by nulling 105 /// them) when they are deleted from the underlying DAG. It relies on 106 /// stable indices of nodes within the worklist. 107 DenseMap<SDNode *, unsigned> WorklistMap; 108 109 /// \brief Set of nodes which have been combined (at least once). 110 /// 111 /// This is used to allow us to reliably add any operands of a DAG node 112 /// which have not yet been combined to the worklist. 113 SmallPtrSet<SDNode *, 32> CombinedNodes; 114 115 // AA - Used for DAG load/store alias analysis. 116 AliasAnalysis &AA; 117 118 /// When an instruction is simplified, add all users of the instruction to 119 /// the work lists because they might get more simplified now. 120 void AddUsersToWorklist(SDNode *N) { 121 for (SDNode *Node : N->uses()) 122 AddToWorklist(Node); 123 } 124 125 /// Call the node-specific routine that folds each particular type of node. 126 SDValue visit(SDNode *N); 127 128 public: 129 /// Add to the worklist making sure its instance is at the back (next to be 130 /// processed.) 131 void AddToWorklist(SDNode *N) { 132 assert(N->getOpcode() != ISD::DELETED_NODE && 133 "Deleted Node added to Worklist"); 134 135 // Skip handle nodes as they can't usefully be combined and confuse the 136 // zero-use deletion strategy. 137 if (N->getOpcode() == ISD::HANDLENODE) 138 return; 139 140 if (WorklistMap.insert(std::make_pair(N, Worklist.size())).second) 141 Worklist.push_back(N); 142 } 143 144 /// Remove all instances of N from the worklist. 145 void removeFromWorklist(SDNode *N) { 146 CombinedNodes.erase(N); 147 148 auto It = WorklistMap.find(N); 149 if (It == WorklistMap.end()) 150 return; // Not in the worklist. 151 152 // Null out the entry rather than erasing it to avoid a linear operation. 153 Worklist[It->second] = nullptr; 154 WorklistMap.erase(It); 155 } 156 157 void deleteAndRecombine(SDNode *N); 158 bool recursivelyDeleteUnusedNodes(SDNode *N); 159 160 /// Replaces all uses of the results of one DAG node with new values. 161 SDValue CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 162 bool AddTo = true); 163 164 /// Replaces all uses of the results of one DAG node with new values. 165 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true) { 166 return CombineTo(N, &Res, 1, AddTo); 167 } 168 169 /// Replaces all uses of the results of one DAG node with new values. 170 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, 171 bool AddTo = true) { 172 SDValue To[] = { Res0, Res1 }; 173 return CombineTo(N, To, 2, AddTo); 174 } 175 176 void CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO); 177 178 private: 179 unsigned MaximumLegalStoreInBits; 180 181 /// Check the specified integer node value to see if it can be simplified or 182 /// if things it uses can be simplified by bit propagation. 183 /// If so, return true. 184 bool SimplifyDemandedBits(SDValue Op) { 185 unsigned BitWidth = Op.getScalarValueSizeInBits(); 186 APInt Demanded = APInt::getAllOnesValue(BitWidth); 187 return SimplifyDemandedBits(Op, Demanded); 188 } 189 190 bool SimplifyDemandedBits(SDValue Op, const APInt &Demanded); 191 192 bool CombineToPreIndexedLoadStore(SDNode *N); 193 bool CombineToPostIndexedLoadStore(SDNode *N); 194 SDValue SplitIndexingFromLoad(LoadSDNode *LD); 195 bool SliceUpLoad(SDNode *N); 196 197 /// \brief Replace an ISD::EXTRACT_VECTOR_ELT of a load with a narrowed 198 /// load. 199 /// 200 /// \param EVE ISD::EXTRACT_VECTOR_ELT to be replaced. 201 /// \param InVecVT type of the input vector to EVE with bitcasts resolved. 202 /// \param EltNo index of the vector element to load. 203 /// \param OriginalLoad load that EVE came from to be replaced. 204 /// \returns EVE on success SDValue() on failure. 205 SDValue ReplaceExtractVectorEltOfLoadWithNarrowedLoad( 206 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad); 207 void ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad); 208 SDValue PromoteOperand(SDValue Op, EVT PVT, bool &Replace); 209 SDValue SExtPromoteOperand(SDValue Op, EVT PVT); 210 SDValue ZExtPromoteOperand(SDValue Op, EVT PVT); 211 SDValue PromoteIntBinOp(SDValue Op); 212 SDValue PromoteIntShiftOp(SDValue Op); 213 SDValue PromoteExtend(SDValue Op); 214 bool PromoteLoad(SDValue Op); 215 216 void ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, SDValue Trunc, 217 SDValue ExtLoad, const SDLoc &DL, 218 ISD::NodeType ExtType); 219 220 /// Call the node-specific routine that knows how to fold each 221 /// particular type of node. If that doesn't do anything, try the 222 /// target-specific DAG combines. 223 SDValue combine(SDNode *N); 224 225 // Visitation implementation - Implement dag node combining for different 226 // node types. The semantics are as follows: 227 // Return Value: 228 // SDValue.getNode() == 0 - No change was made 229 // SDValue.getNode() == N - N was replaced, is dead and has been handled. 230 // otherwise - N should be replaced by the returned Operand. 231 // 232 SDValue visitTokenFactor(SDNode *N); 233 SDValue visitMERGE_VALUES(SDNode *N); 234 SDValue visitADD(SDNode *N); 235 SDValue visitADDLike(SDValue N0, SDValue N1, SDNode *LocReference); 236 SDValue visitSUB(SDNode *N); 237 SDValue visitADDC(SDNode *N); 238 SDValue visitUADDO(SDNode *N); 239 SDValue visitSUBC(SDNode *N); 240 SDValue visitUSUBO(SDNode *N); 241 SDValue visitADDE(SDNode *N); 242 SDValue visitSUBE(SDNode *N); 243 SDValue visitMUL(SDNode *N); 244 SDValue useDivRem(SDNode *N); 245 SDValue visitSDIV(SDNode *N); 246 SDValue visitUDIV(SDNode *N); 247 SDValue visitREM(SDNode *N); 248 SDValue visitMULHU(SDNode *N); 249 SDValue visitMULHS(SDNode *N); 250 SDValue visitSMUL_LOHI(SDNode *N); 251 SDValue visitUMUL_LOHI(SDNode *N); 252 SDValue visitSMULO(SDNode *N); 253 SDValue visitUMULO(SDNode *N); 254 SDValue visitIMINMAX(SDNode *N); 255 SDValue visitAND(SDNode *N); 256 SDValue visitANDLike(SDValue N0, SDValue N1, SDNode *LocReference); 257 SDValue visitOR(SDNode *N); 258 SDValue visitORLike(SDValue N0, SDValue N1, SDNode *LocReference); 259 SDValue visitXOR(SDNode *N); 260 SDValue SimplifyVBinOp(SDNode *N); 261 SDValue visitSHL(SDNode *N); 262 SDValue visitSRA(SDNode *N); 263 SDValue visitSRL(SDNode *N); 264 SDValue visitRotate(SDNode *N); 265 SDValue visitABS(SDNode *N); 266 SDValue visitBSWAP(SDNode *N); 267 SDValue visitBITREVERSE(SDNode *N); 268 SDValue visitCTLZ(SDNode *N); 269 SDValue visitCTLZ_ZERO_UNDEF(SDNode *N); 270 SDValue visitCTTZ(SDNode *N); 271 SDValue visitCTTZ_ZERO_UNDEF(SDNode *N); 272 SDValue visitCTPOP(SDNode *N); 273 SDValue visitSELECT(SDNode *N); 274 SDValue visitVSELECT(SDNode *N); 275 SDValue visitSELECT_CC(SDNode *N); 276 SDValue visitSETCC(SDNode *N); 277 SDValue visitSETCCE(SDNode *N); 278 SDValue visitSIGN_EXTEND(SDNode *N); 279 SDValue visitZERO_EXTEND(SDNode *N); 280 SDValue visitANY_EXTEND(SDNode *N); 281 SDValue visitAssertZext(SDNode *N); 282 SDValue visitSIGN_EXTEND_INREG(SDNode *N); 283 SDValue visitSIGN_EXTEND_VECTOR_INREG(SDNode *N); 284 SDValue visitZERO_EXTEND_VECTOR_INREG(SDNode *N); 285 SDValue visitTRUNCATE(SDNode *N); 286 SDValue visitBITCAST(SDNode *N); 287 SDValue visitBUILD_PAIR(SDNode *N); 288 SDValue visitFADD(SDNode *N); 289 SDValue visitFSUB(SDNode *N); 290 SDValue visitFMUL(SDNode *N); 291 SDValue visitFMA(SDNode *N); 292 SDValue visitFDIV(SDNode *N); 293 SDValue visitFREM(SDNode *N); 294 SDValue visitFSQRT(SDNode *N); 295 SDValue visitFCOPYSIGN(SDNode *N); 296 SDValue visitSINT_TO_FP(SDNode *N); 297 SDValue visitUINT_TO_FP(SDNode *N); 298 SDValue visitFP_TO_SINT(SDNode *N); 299 SDValue visitFP_TO_UINT(SDNode *N); 300 SDValue visitFP_ROUND(SDNode *N); 301 SDValue visitFP_ROUND_INREG(SDNode *N); 302 SDValue visitFP_EXTEND(SDNode *N); 303 SDValue visitFNEG(SDNode *N); 304 SDValue visitFABS(SDNode *N); 305 SDValue visitFCEIL(SDNode *N); 306 SDValue visitFTRUNC(SDNode *N); 307 SDValue visitFFLOOR(SDNode *N); 308 SDValue visitFMINNUM(SDNode *N); 309 SDValue visitFMAXNUM(SDNode *N); 310 SDValue visitBRCOND(SDNode *N); 311 SDValue visitBR_CC(SDNode *N); 312 SDValue visitLOAD(SDNode *N); 313 314 SDValue replaceStoreChain(StoreSDNode *ST, SDValue BetterChain); 315 SDValue replaceStoreOfFPConstant(StoreSDNode *ST); 316 317 SDValue visitSTORE(SDNode *N); 318 SDValue visitINSERT_VECTOR_ELT(SDNode *N); 319 SDValue visitEXTRACT_VECTOR_ELT(SDNode *N); 320 SDValue visitBUILD_VECTOR(SDNode *N); 321 SDValue visitCONCAT_VECTORS(SDNode *N); 322 SDValue visitEXTRACT_SUBVECTOR(SDNode *N); 323 SDValue visitVECTOR_SHUFFLE(SDNode *N); 324 SDValue visitSCALAR_TO_VECTOR(SDNode *N); 325 SDValue visitINSERT_SUBVECTOR(SDNode *N); 326 SDValue visitMLOAD(SDNode *N); 327 SDValue visitMSTORE(SDNode *N); 328 SDValue visitMGATHER(SDNode *N); 329 SDValue visitMSCATTER(SDNode *N); 330 SDValue visitFP_TO_FP16(SDNode *N); 331 SDValue visitFP16_TO_FP(SDNode *N); 332 333 SDValue visitFADDForFMACombine(SDNode *N); 334 SDValue visitFSUBForFMACombine(SDNode *N); 335 SDValue visitFMULForFMADistributiveCombine(SDNode *N); 336 337 SDValue XformToShuffleWithZero(SDNode *N); 338 SDValue ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue LHS, 339 SDValue RHS); 340 341 SDValue visitShiftByConstant(SDNode *N, ConstantSDNode *Amt); 342 343 SDValue foldSelectOfConstants(SDNode *N); 344 SDValue foldBinOpIntoSelect(SDNode *BO); 345 bool SimplifySelectOps(SDNode *SELECT, SDValue LHS, SDValue RHS); 346 SDValue SimplifyBinOpWithSameOpcodeHands(SDNode *N); 347 SDValue SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, SDValue N2); 348 SDValue SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1, 349 SDValue N2, SDValue N3, ISD::CondCode CC, 350 bool NotExtCompare = false); 351 SDValue foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, SDValue N1, 352 SDValue N2, SDValue N3, ISD::CondCode CC); 353 SDValue foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1, 354 const SDLoc &DL); 355 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1, ISD::CondCode Cond, 356 const SDLoc &DL, bool foldBooleans = true); 357 358 bool isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 359 SDValue &CC) const; 360 bool isOneUseSetCC(SDValue N) const; 361 362 SDValue SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 363 unsigned HiOp); 364 SDValue CombineConsecutiveLoads(SDNode *N, EVT VT); 365 SDValue CombineExtLoad(SDNode *N); 366 SDValue combineRepeatedFPDivisors(SDNode *N); 367 SDValue ConstantFoldBITCASTofBUILD_VECTOR(SDNode *, EVT); 368 SDValue BuildSDIV(SDNode *N); 369 SDValue BuildSDIVPow2(SDNode *N); 370 SDValue BuildUDIV(SDNode *N); 371 SDValue BuildLogBase2(SDValue Op, const SDLoc &DL); 372 SDValue BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags); 373 SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags *Flags); 374 SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags); 375 SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags *Flags, bool Recip); 376 SDValue buildSqrtNROneConst(SDValue Op, SDValue Est, unsigned Iterations, 377 SDNodeFlags *Flags, bool Reciprocal); 378 SDValue buildSqrtNRTwoConst(SDValue Op, SDValue Est, unsigned Iterations, 379 SDNodeFlags *Flags, bool Reciprocal); 380 SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 381 bool DemandHighBits = true); 382 SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1); 383 SDNode *MatchRotatePosNeg(SDValue Shifted, SDValue Pos, SDValue Neg, 384 SDValue InnerPos, SDValue InnerNeg, 385 unsigned PosOpcode, unsigned NegOpcode, 386 const SDLoc &DL); 387 SDNode *MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL); 388 SDValue MatchLoadCombine(SDNode *N); 389 SDValue ReduceLoadWidth(SDNode *N); 390 SDValue ReduceLoadOpStoreWidth(SDNode *N); 391 SDValue splitMergedValStore(StoreSDNode *ST); 392 SDValue TransformFPLoadStorePair(SDNode *N); 393 SDValue reduceBuildVecExtToExtBuildVec(SDNode *N); 394 SDValue reduceBuildVecConvertToConvertBuildVec(SDNode *N); 395 SDValue reduceBuildVecToShuffle(SDNode *N); 396 SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N, 397 ArrayRef<int> VectorMask, SDValue VecIn1, 398 SDValue VecIn2, unsigned LeftIdx); 399 400 SDValue GetDemandedBits(SDValue V, const APInt &Mask); 401 402 /// Walk up chain skipping non-aliasing memory nodes, 403 /// looking for aliasing nodes and adding them to the Aliases vector. 404 void GatherAllAliases(SDNode *N, SDValue OriginalChain, 405 SmallVectorImpl<SDValue> &Aliases); 406 407 /// Return true if there is any possibility that the two addresses overlap. 408 bool isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const; 409 410 /// Walk up chain skipping non-aliasing memory nodes, looking for a better 411 /// chain (aliasing node.) 412 SDValue FindBetterChain(SDNode *N, SDValue Chain); 413 414 /// Try to replace a store and any possibly adjacent stores on 415 /// consecutive chains with better chains. Return true only if St is 416 /// replaced. 417 /// 418 /// Notice that other chains may still be replaced even if the function 419 /// returns false. 420 bool findBetterNeighborChains(StoreSDNode *St); 421 422 /// Match "(X shl/srl V1) & V2" where V2 may not be present. 423 bool MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask); 424 425 /// Holds a pointer to an LSBaseSDNode as well as information on where it 426 /// is located in a sequence of memory operations connected by a chain. 427 struct MemOpLink { 428 MemOpLink(LSBaseSDNode *N, int64_t Offset) 429 : MemNode(N), OffsetFromBase(Offset) {} 430 // Ptr to the mem node. 431 LSBaseSDNode *MemNode; 432 // Offset from the base ptr. 433 int64_t OffsetFromBase; 434 }; 435 436 /// This is a helper function for visitMUL to check the profitability 437 /// of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 438 /// MulNode is the original multiply, AddNode is (add x, c1), 439 /// and ConstNode is c2. 440 bool isMulAddWithConstProfitable(SDNode *MulNode, 441 SDValue &AddNode, 442 SDValue &ConstNode); 443 444 445 /// This is a helper function for visitAND and visitZERO_EXTEND. Returns 446 /// true if the (and (load x) c) pattern matches an extload. ExtVT returns 447 /// the type of the loaded value to be extended. LoadedVT returns the type 448 /// of the original loaded value. NarrowLoad returns whether the load would 449 /// need to be narrowed in order to match. 450 bool isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN, 451 EVT LoadResultTy, EVT &ExtVT, EVT &LoadedVT, 452 bool &NarrowLoad); 453 454 /// Helper function for MergeConsecutiveStores which merges the 455 /// component store chains. 456 SDValue getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes, 457 unsigned NumStores); 458 459 /// This is a helper function for MergeConsecutiveStores. When the source 460 /// elements of the consecutive stores are all constants or all extracted 461 /// vector elements, try to merge them into one larger store. 462 /// \return True if a merged store was created. 463 bool MergeStoresOfConstantsOrVecElts(SmallVectorImpl<MemOpLink> &StoreNodes, 464 EVT MemVT, unsigned NumStores, 465 bool IsConstantSrc, bool UseVector); 466 467 /// This is a helper function for MergeConsecutiveStores. 468 /// Stores that may be merged are placed in StoreNodes. 469 void getStoreMergeCandidates(StoreSDNode *St, 470 SmallVectorImpl<MemOpLink> &StoreNodes); 471 472 /// Helper function for MergeConsecutiveStores. Checks if 473 /// Candidate stores have indirect dependency through their 474 /// operands. \return True if safe to merge 475 bool checkMergeStoreCandidatesForDependencies( 476 SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores); 477 478 /// Merge consecutive store operations into a wide store. 479 /// This optimization uses wide integers or vectors when possible. 480 /// \return number of stores that were merged into a merged store (the 481 /// affected nodes are stored as a prefix in \p StoreNodes). 482 bool MergeConsecutiveStores(StoreSDNode *N); 483 484 /// \brief Try to transform a truncation where C is a constant: 485 /// (trunc (and X, C)) -> (and (trunc X), (trunc C)) 486 /// 487 /// \p N needs to be a truncation and its first operand an AND. Other 488 /// requirements are checked by the function (e.g. that trunc is 489 /// single-use) and if missed an empty SDValue is returned. 490 SDValue distributeTruncateThroughAnd(SDNode *N); 491 492 public: 493 DAGCombiner(SelectionDAG &D, AliasAnalysis &A, CodeGenOpt::Level OL) 494 : DAG(D), TLI(D.getTargetLoweringInfo()), Level(BeforeLegalizeTypes), 495 OptLevel(OL), LegalOperations(false), LegalTypes(false), AA(A) { 496 ForCodeSize = DAG.getMachineFunction().getFunction()->optForSize(); 497 498 MaximumLegalStoreInBits = 0; 499 for (MVT VT : MVT::all_valuetypes()) 500 if (EVT(VT).isSimple() && VT != MVT::Other && 501 TLI.isTypeLegal(EVT(VT)) && 502 VT.getSizeInBits() >= MaximumLegalStoreInBits) 503 MaximumLegalStoreInBits = VT.getSizeInBits(); 504 } 505 506 /// Runs the dag combiner on all nodes in the work list 507 void Run(CombineLevel AtLevel); 508 509 SelectionDAG &getDAG() const { return DAG; } 510 511 /// Returns a type large enough to hold any valid shift amount - before type 512 /// legalization these can be huge. 513 EVT getShiftAmountTy(EVT LHSTy) { 514 assert(LHSTy.isInteger() && "Shift amount is not an integer type!"); 515 if (LHSTy.isVector()) 516 return LHSTy; 517 auto &DL = DAG.getDataLayout(); 518 return LegalTypes ? TLI.getScalarShiftAmountTy(DL, LHSTy) 519 : TLI.getPointerTy(DL); 520 } 521 522 /// This method returns true if we are running before type legalization or 523 /// if the specified VT is legal. 524 bool isTypeLegal(const EVT &VT) { 525 if (!LegalTypes) return true; 526 return TLI.isTypeLegal(VT); 527 } 528 529 /// Convenience wrapper around TargetLowering::getSetCCResultType 530 EVT getSetCCResultType(EVT VT) const { 531 return TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 532 } 533 }; 534 } 535 536 537 namespace { 538 /// This class is a DAGUpdateListener that removes any deleted 539 /// nodes from the worklist. 540 class WorklistRemover : public SelectionDAG::DAGUpdateListener { 541 DAGCombiner &DC; 542 public: 543 explicit WorklistRemover(DAGCombiner &dc) 544 : SelectionDAG::DAGUpdateListener(dc.getDAG()), DC(dc) {} 545 546 void NodeDeleted(SDNode *N, SDNode *E) override { 547 DC.removeFromWorklist(N); 548 } 549 }; 550 } 551 552 //===----------------------------------------------------------------------===// 553 // TargetLowering::DAGCombinerInfo implementation 554 //===----------------------------------------------------------------------===// 555 556 void TargetLowering::DAGCombinerInfo::AddToWorklist(SDNode *N) { 557 ((DAGCombiner*)DC)->AddToWorklist(N); 558 } 559 560 SDValue TargetLowering::DAGCombinerInfo:: 561 CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo) { 562 return ((DAGCombiner*)DC)->CombineTo(N, &To[0], To.size(), AddTo); 563 } 564 565 SDValue TargetLowering::DAGCombinerInfo:: 566 CombineTo(SDNode *N, SDValue Res, bool AddTo) { 567 return ((DAGCombiner*)DC)->CombineTo(N, Res, AddTo); 568 } 569 570 571 SDValue TargetLowering::DAGCombinerInfo:: 572 CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo) { 573 return ((DAGCombiner*)DC)->CombineTo(N, Res0, Res1, AddTo); 574 } 575 576 void TargetLowering::DAGCombinerInfo:: 577 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 578 return ((DAGCombiner*)DC)->CommitTargetLoweringOpt(TLO); 579 } 580 581 //===----------------------------------------------------------------------===// 582 // Helper Functions 583 //===----------------------------------------------------------------------===// 584 585 void DAGCombiner::deleteAndRecombine(SDNode *N) { 586 removeFromWorklist(N); 587 588 // If the operands of this node are only used by the node, they will now be 589 // dead. Make sure to re-visit them and recursively delete dead nodes. 590 for (const SDValue &Op : N->ops()) 591 // For an operand generating multiple values, one of the values may 592 // become dead allowing further simplification (e.g. split index 593 // arithmetic from an indexed load). 594 if (Op->hasOneUse() || Op->getNumValues() > 1) 595 AddToWorklist(Op.getNode()); 596 597 DAG.DeleteNode(N); 598 } 599 600 /// Return 1 if we can compute the negated form of the specified expression for 601 /// the same cost as the expression itself, or 2 if we can compute the negated 602 /// form more cheaply than the expression itself. 603 static char isNegatibleForFree(SDValue Op, bool LegalOperations, 604 const TargetLowering &TLI, 605 const TargetOptions *Options, 606 unsigned Depth = 0) { 607 // fneg is removable even if it has multiple uses. 608 if (Op.getOpcode() == ISD::FNEG) return 2; 609 610 // Don't allow anything with multiple uses. 611 if (!Op.hasOneUse()) return 0; 612 613 // Don't recurse exponentially. 614 if (Depth > 6) return 0; 615 616 switch (Op.getOpcode()) { 617 default: return false; 618 case ISD::ConstantFP: { 619 if (!LegalOperations) 620 return 1; 621 622 // Don't invert constant FP values after legalization unless the target says 623 // the negated constant is legal. 624 EVT VT = Op.getValueType(); 625 return TLI.isOperationLegal(ISD::ConstantFP, VT) || 626 TLI.isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT); 627 } 628 case ISD::FADD: 629 // FIXME: determine better conditions for this xform. 630 if (!Options->UnsafeFPMath) return 0; 631 632 // After operation legalization, it might not be legal to create new FSUBs. 633 if (LegalOperations && 634 !TLI.isOperationLegalOrCustom(ISD::FSUB, Op.getValueType())) 635 return 0; 636 637 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 638 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 639 Options, Depth + 1)) 640 return V; 641 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 642 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 643 Depth + 1); 644 case ISD::FSUB: 645 // We can't turn -(A-B) into B-A when we honor signed zeros. 646 if (!Options->NoSignedZerosFPMath && 647 !Op.getNode()->getFlags()->hasNoSignedZeros()) 648 return 0; 649 650 // fold (fneg (fsub A, B)) -> (fsub B, A) 651 return 1; 652 653 case ISD::FMUL: 654 case ISD::FDIV: 655 if (Options->HonorSignDependentRoundingFPMath()) return 0; 656 657 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) or (fmul X, (fneg Y)) 658 if (char V = isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, 659 Options, Depth + 1)) 660 return V; 661 662 return isNegatibleForFree(Op.getOperand(1), LegalOperations, TLI, Options, 663 Depth + 1); 664 665 case ISD::FP_EXTEND: 666 case ISD::FP_ROUND: 667 case ISD::FSIN: 668 return isNegatibleForFree(Op.getOperand(0), LegalOperations, TLI, Options, 669 Depth + 1); 670 } 671 } 672 673 /// If isNegatibleForFree returns true, return the newly negated expression. 674 static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG, 675 bool LegalOperations, unsigned Depth = 0) { 676 const TargetOptions &Options = DAG.getTarget().Options; 677 // fneg is removable even if it has multiple uses. 678 if (Op.getOpcode() == ISD::FNEG) return Op.getOperand(0); 679 680 // Don't allow anything with multiple uses. 681 assert(Op.hasOneUse() && "Unknown reuse!"); 682 683 assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree"); 684 685 const SDNodeFlags *Flags = Op.getNode()->getFlags(); 686 687 switch (Op.getOpcode()) { 688 default: llvm_unreachable("Unknown code"); 689 case ISD::ConstantFP: { 690 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 691 V.changeSign(); 692 return DAG.getConstantFP(V, SDLoc(Op), Op.getValueType()); 693 } 694 case ISD::FADD: 695 // FIXME: determine better conditions for this xform. 696 assert(Options.UnsafeFPMath); 697 698 // fold (fneg (fadd A, B)) -> (fsub (fneg A), B) 699 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 700 DAG.getTargetLoweringInfo(), &Options, Depth+1)) 701 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 702 GetNegatedExpression(Op.getOperand(0), DAG, 703 LegalOperations, Depth+1), 704 Op.getOperand(1), Flags); 705 // fold (fneg (fadd A, B)) -> (fsub (fneg B), A) 706 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 707 GetNegatedExpression(Op.getOperand(1), DAG, 708 LegalOperations, Depth+1), 709 Op.getOperand(0), Flags); 710 case ISD::FSUB: 711 // fold (fneg (fsub 0, B)) -> B 712 if (ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(Op.getOperand(0))) 713 if (N0CFP->isZero()) 714 return Op.getOperand(1); 715 716 // fold (fneg (fsub A, B)) -> (fsub B, A) 717 return DAG.getNode(ISD::FSUB, SDLoc(Op), Op.getValueType(), 718 Op.getOperand(1), Op.getOperand(0), Flags); 719 720 case ISD::FMUL: 721 case ISD::FDIV: 722 assert(!Options.HonorSignDependentRoundingFPMath()); 723 724 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 725 if (isNegatibleForFree(Op.getOperand(0), LegalOperations, 726 DAG.getTargetLoweringInfo(), &Options, Depth+1)) 727 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 728 GetNegatedExpression(Op.getOperand(0), DAG, 729 LegalOperations, Depth+1), 730 Op.getOperand(1), Flags); 731 732 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 733 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 734 Op.getOperand(0), 735 GetNegatedExpression(Op.getOperand(1), DAG, 736 LegalOperations, Depth+1), Flags); 737 738 case ISD::FP_EXTEND: 739 case ISD::FSIN: 740 return DAG.getNode(Op.getOpcode(), SDLoc(Op), Op.getValueType(), 741 GetNegatedExpression(Op.getOperand(0), DAG, 742 LegalOperations, Depth+1)); 743 case ISD::FP_ROUND: 744 return DAG.getNode(ISD::FP_ROUND, SDLoc(Op), Op.getValueType(), 745 GetNegatedExpression(Op.getOperand(0), DAG, 746 LegalOperations, Depth+1), 747 Op.getOperand(1)); 748 } 749 } 750 751 // APInts must be the same size for most operations, this helper 752 // function zero extends the shorter of the pair so that they match. 753 // We provide an Offset so that we can create bitwidths that won't overflow. 754 static void zeroExtendToMatch(APInt &LHS, APInt &RHS, unsigned Offset = 0) { 755 unsigned Bits = Offset + std::max(LHS.getBitWidth(), RHS.getBitWidth()); 756 LHS = LHS.zextOrSelf(Bits); 757 RHS = RHS.zextOrSelf(Bits); 758 } 759 760 // Return true if this node is a setcc, or is a select_cc 761 // that selects between the target values used for true and false, making it 762 // equivalent to a setcc. Also, set the incoming LHS, RHS, and CC references to 763 // the appropriate nodes based on the type of node we are checking. This 764 // simplifies life a bit for the callers. 765 bool DAGCombiner::isSetCCEquivalent(SDValue N, SDValue &LHS, SDValue &RHS, 766 SDValue &CC) const { 767 if (N.getOpcode() == ISD::SETCC) { 768 LHS = N.getOperand(0); 769 RHS = N.getOperand(1); 770 CC = N.getOperand(2); 771 return true; 772 } 773 774 if (N.getOpcode() != ISD::SELECT_CC || 775 !TLI.isConstTrueVal(N.getOperand(2).getNode()) || 776 !TLI.isConstFalseVal(N.getOperand(3).getNode())) 777 return false; 778 779 if (TLI.getBooleanContents(N.getValueType()) == 780 TargetLowering::UndefinedBooleanContent) 781 return false; 782 783 LHS = N.getOperand(0); 784 RHS = N.getOperand(1); 785 CC = N.getOperand(4); 786 return true; 787 } 788 789 /// Return true if this is a SetCC-equivalent operation with only one use. 790 /// If this is true, it allows the users to invert the operation for free when 791 /// it is profitable to do so. 792 bool DAGCombiner::isOneUseSetCC(SDValue N) const { 793 SDValue N0, N1, N2; 794 if (isSetCCEquivalent(N, N0, N1, N2) && N.getNode()->hasOneUse()) 795 return true; 796 return false; 797 } 798 799 // \brief Returns the SDNode if it is a constant float BuildVector 800 // or constant float. 801 static SDNode *isConstantFPBuildVectorOrConstantFP(SDValue N) { 802 if (isa<ConstantFPSDNode>(N)) 803 return N.getNode(); 804 if (ISD::isBuildVectorOfConstantFPSDNodes(N.getNode())) 805 return N.getNode(); 806 return nullptr; 807 } 808 809 // Determines if it is a constant integer or a build vector of constant 810 // integers (and undefs). 811 // Do not permit build vector implicit truncation. 812 static bool isConstantOrConstantVector(SDValue N, bool NoOpaques = false) { 813 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N)) 814 return !(Const->isOpaque() && NoOpaques); 815 if (N.getOpcode() != ISD::BUILD_VECTOR) 816 return false; 817 unsigned BitWidth = N.getScalarValueSizeInBits(); 818 for (const SDValue &Op : N->op_values()) { 819 if (Op.isUndef()) 820 continue; 821 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(Op); 822 if (!Const || Const->getAPIntValue().getBitWidth() != BitWidth || 823 (Const->isOpaque() && NoOpaques)) 824 return false; 825 } 826 return true; 827 } 828 829 // Determines if it is a constant null integer or a splatted vector of a 830 // constant null integer (with no undefs). 831 // Build vector implicit truncation is not an issue for null values. 832 static bool isNullConstantOrNullSplatConstant(SDValue N) { 833 if (ConstantSDNode *Splat = isConstOrConstSplat(N)) 834 return Splat->isNullValue(); 835 return false; 836 } 837 838 // Determines if it is a constant integer of one or a splatted vector of a 839 // constant integer of one (with no undefs). 840 // Do not permit build vector implicit truncation. 841 static bool isOneConstantOrOneSplatConstant(SDValue N) { 842 unsigned BitWidth = N.getScalarValueSizeInBits(); 843 if (ConstantSDNode *Splat = isConstOrConstSplat(N)) 844 return Splat->isOne() && Splat->getAPIntValue().getBitWidth() == BitWidth; 845 return false; 846 } 847 848 // Determines if it is a constant integer of all ones or a splatted vector of a 849 // constant integer of all ones (with no undefs). 850 // Do not permit build vector implicit truncation. 851 static bool isAllOnesConstantOrAllOnesSplatConstant(SDValue N) { 852 unsigned BitWidth = N.getScalarValueSizeInBits(); 853 if (ConstantSDNode *Splat = isConstOrConstSplat(N)) 854 return Splat->isAllOnesValue() && 855 Splat->getAPIntValue().getBitWidth() == BitWidth; 856 return false; 857 } 858 859 // Determines if a BUILD_VECTOR is composed of all-constants possibly mixed with 860 // undef's. 861 static bool isAnyConstantBuildVector(const SDNode *N) { 862 return ISD::isBuildVectorOfConstantSDNodes(N) || 863 ISD::isBuildVectorOfConstantFPSDNodes(N); 864 } 865 866 SDValue DAGCombiner::ReassociateOps(unsigned Opc, const SDLoc &DL, SDValue N0, 867 SDValue N1) { 868 EVT VT = N0.getValueType(); 869 if (N0.getOpcode() == Opc) { 870 if (SDNode *L = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) { 871 if (SDNode *R = DAG.isConstantIntBuildVectorOrConstantInt(N1)) { 872 // reassoc. (op (op x, c1), c2) -> (op x, (op c1, c2)) 873 if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, L, R)) 874 return DAG.getNode(Opc, DL, VT, N0.getOperand(0), OpNode); 875 return SDValue(); 876 } 877 if (N0.hasOneUse()) { 878 // reassoc. (op (op x, c1), y) -> (op (op x, y), c1) iff x+c1 has one 879 // use 880 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0.getOperand(0), N1); 881 if (!OpNode.getNode()) 882 return SDValue(); 883 AddToWorklist(OpNode.getNode()); 884 return DAG.getNode(Opc, DL, VT, OpNode, N0.getOperand(1)); 885 } 886 } 887 } 888 889 if (N1.getOpcode() == Opc) { 890 if (SDNode *R = DAG.isConstantIntBuildVectorOrConstantInt(N1.getOperand(1))) { 891 if (SDNode *L = DAG.isConstantIntBuildVectorOrConstantInt(N0)) { 892 // reassoc. (op c2, (op x, c1)) -> (op x, (op c1, c2)) 893 if (SDValue OpNode = DAG.FoldConstantArithmetic(Opc, DL, VT, R, L)) 894 return DAG.getNode(Opc, DL, VT, N1.getOperand(0), OpNode); 895 return SDValue(); 896 } 897 if (N1.hasOneUse()) { 898 // reassoc. (op x, (op y, c1)) -> (op (op x, y), c1) iff x+c1 has one 899 // use 900 SDValue OpNode = DAG.getNode(Opc, SDLoc(N0), VT, N0, N1.getOperand(0)); 901 if (!OpNode.getNode()) 902 return SDValue(); 903 AddToWorklist(OpNode.getNode()); 904 return DAG.getNode(Opc, DL, VT, OpNode, N1.getOperand(1)); 905 } 906 } 907 } 908 909 return SDValue(); 910 } 911 912 SDValue DAGCombiner::CombineTo(SDNode *N, const SDValue *To, unsigned NumTo, 913 bool AddTo) { 914 assert(N->getNumValues() == NumTo && "Broken CombineTo call!"); 915 ++NodesCombined; 916 DEBUG(dbgs() << "\nReplacing.1 "; 917 N->dump(&DAG); 918 dbgs() << "\nWith: "; 919 To[0].getNode()->dump(&DAG); 920 dbgs() << " and " << NumTo-1 << " other values\n"); 921 for (unsigned i = 0, e = NumTo; i != e; ++i) 922 assert((!To[i].getNode() || 923 N->getValueType(i) == To[i].getValueType()) && 924 "Cannot combine value to value of different type!"); 925 926 WorklistRemover DeadNodes(*this); 927 DAG.ReplaceAllUsesWith(N, To); 928 if (AddTo) { 929 // Push the new nodes and any users onto the worklist 930 for (unsigned i = 0, e = NumTo; i != e; ++i) { 931 if (To[i].getNode()) { 932 AddToWorklist(To[i].getNode()); 933 AddUsersToWorklist(To[i].getNode()); 934 } 935 } 936 } 937 938 // Finally, if the node is now dead, remove it from the graph. The node 939 // may not be dead if the replacement process recursively simplified to 940 // something else needing this node. 941 if (N->use_empty()) 942 deleteAndRecombine(N); 943 return SDValue(N, 0); 944 } 945 946 void DAGCombiner:: 947 CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) { 948 // Replace all uses. If any nodes become isomorphic to other nodes and 949 // are deleted, make sure to remove them from our worklist. 950 WorklistRemover DeadNodes(*this); 951 DAG.ReplaceAllUsesOfValueWith(TLO.Old, TLO.New); 952 953 // Push the new node and any (possibly new) users onto the worklist. 954 AddToWorklist(TLO.New.getNode()); 955 AddUsersToWorklist(TLO.New.getNode()); 956 957 // Finally, if the node is now dead, remove it from the graph. The node 958 // may not be dead if the replacement process recursively simplified to 959 // something else needing this node. 960 if (TLO.Old.getNode()->use_empty()) 961 deleteAndRecombine(TLO.Old.getNode()); 962 } 963 964 /// Check the specified integer node value to see if it can be simplified or if 965 /// things it uses can be simplified by bit propagation. If so, return true. 966 bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) { 967 TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations); 968 APInt KnownZero, KnownOne; 969 if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) 970 return false; 971 972 // Revisit the node. 973 AddToWorklist(Op.getNode()); 974 975 // Replace the old value with the new one. 976 ++NodesCombined; 977 DEBUG(dbgs() << "\nReplacing.2 "; 978 TLO.Old.getNode()->dump(&DAG); 979 dbgs() << "\nWith: "; 980 TLO.New.getNode()->dump(&DAG); 981 dbgs() << '\n'); 982 983 CommitTargetLoweringOpt(TLO); 984 return true; 985 } 986 987 void DAGCombiner::ReplaceLoadWithPromotedLoad(SDNode *Load, SDNode *ExtLoad) { 988 SDLoc DL(Load); 989 EVT VT = Load->getValueType(0); 990 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, VT, SDValue(ExtLoad, 0)); 991 992 DEBUG(dbgs() << "\nReplacing.9 "; 993 Load->dump(&DAG); 994 dbgs() << "\nWith: "; 995 Trunc.getNode()->dump(&DAG); 996 dbgs() << '\n'); 997 WorklistRemover DeadNodes(*this); 998 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), Trunc); 999 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), SDValue(ExtLoad, 1)); 1000 deleteAndRecombine(Load); 1001 AddToWorklist(Trunc.getNode()); 1002 } 1003 1004 SDValue DAGCombiner::PromoteOperand(SDValue Op, EVT PVT, bool &Replace) { 1005 Replace = false; 1006 SDLoc DL(Op); 1007 if (ISD::isUNINDEXEDLoad(Op.getNode())) { 1008 LoadSDNode *LD = cast<LoadSDNode>(Op); 1009 EVT MemVT = LD->getMemoryVT(); 1010 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 1011 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD 1012 : ISD::EXTLOAD) 1013 : LD->getExtensionType(); 1014 Replace = true; 1015 return DAG.getExtLoad(ExtType, DL, PVT, 1016 LD->getChain(), LD->getBasePtr(), 1017 MemVT, LD->getMemOperand()); 1018 } 1019 1020 unsigned Opc = Op.getOpcode(); 1021 switch (Opc) { 1022 default: break; 1023 case ISD::AssertSext: 1024 return DAG.getNode(ISD::AssertSext, DL, PVT, 1025 SExtPromoteOperand(Op.getOperand(0), PVT), 1026 Op.getOperand(1)); 1027 case ISD::AssertZext: 1028 return DAG.getNode(ISD::AssertZext, DL, PVT, 1029 ZExtPromoteOperand(Op.getOperand(0), PVT), 1030 Op.getOperand(1)); 1031 case ISD::Constant: { 1032 unsigned ExtOpc = 1033 Op.getValueType().isByteSized() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1034 return DAG.getNode(ExtOpc, DL, PVT, Op); 1035 } 1036 } 1037 1038 if (!TLI.isOperationLegal(ISD::ANY_EXTEND, PVT)) 1039 return SDValue(); 1040 return DAG.getNode(ISD::ANY_EXTEND, DL, PVT, Op); 1041 } 1042 1043 SDValue DAGCombiner::SExtPromoteOperand(SDValue Op, EVT PVT) { 1044 if (!TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, PVT)) 1045 return SDValue(); 1046 EVT OldVT = Op.getValueType(); 1047 SDLoc DL(Op); 1048 bool Replace = false; 1049 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 1050 if (!NewOp.getNode()) 1051 return SDValue(); 1052 AddToWorklist(NewOp.getNode()); 1053 1054 if (Replace) 1055 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 1056 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, NewOp.getValueType(), NewOp, 1057 DAG.getValueType(OldVT)); 1058 } 1059 1060 SDValue DAGCombiner::ZExtPromoteOperand(SDValue Op, EVT PVT) { 1061 EVT OldVT = Op.getValueType(); 1062 SDLoc DL(Op); 1063 bool Replace = false; 1064 SDValue NewOp = PromoteOperand(Op, PVT, Replace); 1065 if (!NewOp.getNode()) 1066 return SDValue(); 1067 AddToWorklist(NewOp.getNode()); 1068 1069 if (Replace) 1070 ReplaceLoadWithPromotedLoad(Op.getNode(), NewOp.getNode()); 1071 return DAG.getZeroExtendInReg(NewOp, DL, OldVT); 1072 } 1073 1074 /// Promote the specified integer binary operation if the target indicates it is 1075 /// beneficial. e.g. On x86, it's usually better to promote i16 operations to 1076 /// i32 since i16 instructions are longer. 1077 SDValue DAGCombiner::PromoteIntBinOp(SDValue Op) { 1078 if (!LegalOperations) 1079 return SDValue(); 1080 1081 EVT VT = Op.getValueType(); 1082 if (VT.isVector() || !VT.isInteger()) 1083 return SDValue(); 1084 1085 // If operation type is 'undesirable', e.g. i16 on x86, consider 1086 // promoting it. 1087 unsigned Opc = Op.getOpcode(); 1088 if (TLI.isTypeDesirableForOp(Opc, VT)) 1089 return SDValue(); 1090 1091 EVT PVT = VT; 1092 // Consult target whether it is a good idea to promote this operation and 1093 // what's the right type to promote it to. 1094 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1095 assert(PVT != VT && "Don't know what type to promote to!"); 1096 1097 DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG)); 1098 1099 bool Replace0 = false; 1100 SDValue N0 = Op.getOperand(0); 1101 SDValue NN0 = PromoteOperand(N0, PVT, Replace0); 1102 1103 bool Replace1 = false; 1104 SDValue N1 = Op.getOperand(1); 1105 SDValue NN1 = PromoteOperand(N1, PVT, Replace1); 1106 SDLoc DL(Op); 1107 1108 SDValue RV = 1109 DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, NN0, NN1)); 1110 1111 // New replace instances of N0 and N1 1112 if (Replace0 && N0 && N0.getOpcode() != ISD::DELETED_NODE && NN0 && 1113 NN0.getOpcode() != ISD::DELETED_NODE) { 1114 AddToWorklist(NN0.getNode()); 1115 ReplaceLoadWithPromotedLoad(N0.getNode(), NN0.getNode()); 1116 } 1117 1118 if (Replace1 && N1 && N1.getOpcode() != ISD::DELETED_NODE && NN1 && 1119 NN1.getOpcode() != ISD::DELETED_NODE) { 1120 AddToWorklist(NN1.getNode()); 1121 ReplaceLoadWithPromotedLoad(N1.getNode(), NN1.getNode()); 1122 } 1123 1124 // Deal with Op being deleted. 1125 if (Op && Op.getOpcode() != ISD::DELETED_NODE) 1126 return RV; 1127 } 1128 return SDValue(); 1129 } 1130 1131 /// Promote the specified integer shift operation if the target indicates it is 1132 /// beneficial. e.g. On x86, it's usually better to promote i16 operations to 1133 /// i32 since i16 instructions are longer. 1134 SDValue DAGCombiner::PromoteIntShiftOp(SDValue Op) { 1135 if (!LegalOperations) 1136 return SDValue(); 1137 1138 EVT VT = Op.getValueType(); 1139 if (VT.isVector() || !VT.isInteger()) 1140 return SDValue(); 1141 1142 // If operation type is 'undesirable', e.g. i16 on x86, consider 1143 // promoting it. 1144 unsigned Opc = Op.getOpcode(); 1145 if (TLI.isTypeDesirableForOp(Opc, VT)) 1146 return SDValue(); 1147 1148 EVT PVT = VT; 1149 // Consult target whether it is a good idea to promote this operation and 1150 // what's the right type to promote it to. 1151 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1152 assert(PVT != VT && "Don't know what type to promote to!"); 1153 1154 DEBUG(dbgs() << "\nPromoting "; Op.getNode()->dump(&DAG)); 1155 1156 bool Replace = false; 1157 SDValue N0 = Op.getOperand(0); 1158 SDValue N1 = Op.getOperand(1); 1159 if (Opc == ISD::SRA) 1160 N0 = SExtPromoteOperand(N0, PVT); 1161 else if (Opc == ISD::SRL) 1162 N0 = ZExtPromoteOperand(N0, PVT); 1163 else 1164 N0 = PromoteOperand(N0, PVT, Replace); 1165 1166 if (!N0.getNode()) 1167 return SDValue(); 1168 1169 SDLoc DL(Op); 1170 SDValue RV = 1171 DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getNode(Opc, DL, PVT, N0, N1)); 1172 1173 AddToWorklist(N0.getNode()); 1174 if (Replace) 1175 ReplaceLoadWithPromotedLoad(Op.getOperand(0).getNode(), N0.getNode()); 1176 1177 // Deal with Op being deleted. 1178 if (Op && Op.getOpcode() != ISD::DELETED_NODE) 1179 return RV; 1180 } 1181 return SDValue(); 1182 } 1183 1184 SDValue DAGCombiner::PromoteExtend(SDValue Op) { 1185 if (!LegalOperations) 1186 return SDValue(); 1187 1188 EVT VT = Op.getValueType(); 1189 if (VT.isVector() || !VT.isInteger()) 1190 return SDValue(); 1191 1192 // If operation type is 'undesirable', e.g. i16 on x86, consider 1193 // promoting it. 1194 unsigned Opc = Op.getOpcode(); 1195 if (TLI.isTypeDesirableForOp(Opc, VT)) 1196 return SDValue(); 1197 1198 EVT PVT = VT; 1199 // Consult target whether it is a good idea to promote this operation and 1200 // what's the right type to promote it to. 1201 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1202 assert(PVT != VT && "Don't know what type to promote to!"); 1203 // fold (aext (aext x)) -> (aext x) 1204 // fold (aext (zext x)) -> (zext x) 1205 // fold (aext (sext x)) -> (sext x) 1206 DEBUG(dbgs() << "\nPromoting "; 1207 Op.getNode()->dump(&DAG)); 1208 return DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, Op.getOperand(0)); 1209 } 1210 return SDValue(); 1211 } 1212 1213 bool DAGCombiner::PromoteLoad(SDValue Op) { 1214 if (!LegalOperations) 1215 return false; 1216 1217 if (!ISD::isUNINDEXEDLoad(Op.getNode())) 1218 return false; 1219 1220 EVT VT = Op.getValueType(); 1221 if (VT.isVector() || !VT.isInteger()) 1222 return false; 1223 1224 // If operation type is 'undesirable', e.g. i16 on x86, consider 1225 // promoting it. 1226 unsigned Opc = Op.getOpcode(); 1227 if (TLI.isTypeDesirableForOp(Opc, VT)) 1228 return false; 1229 1230 EVT PVT = VT; 1231 // Consult target whether it is a good idea to promote this operation and 1232 // what's the right type to promote it to. 1233 if (TLI.IsDesirableToPromoteOp(Op, PVT)) { 1234 assert(PVT != VT && "Don't know what type to promote to!"); 1235 1236 SDLoc DL(Op); 1237 SDNode *N = Op.getNode(); 1238 LoadSDNode *LD = cast<LoadSDNode>(N); 1239 EVT MemVT = LD->getMemoryVT(); 1240 ISD::LoadExtType ExtType = ISD::isNON_EXTLoad(LD) 1241 ? (TLI.isLoadExtLegal(ISD::ZEXTLOAD, PVT, MemVT) ? ISD::ZEXTLOAD 1242 : ISD::EXTLOAD) 1243 : LD->getExtensionType(); 1244 SDValue NewLD = DAG.getExtLoad(ExtType, DL, PVT, 1245 LD->getChain(), LD->getBasePtr(), 1246 MemVT, LD->getMemOperand()); 1247 SDValue Result = DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD); 1248 1249 DEBUG(dbgs() << "\nPromoting "; 1250 N->dump(&DAG); 1251 dbgs() << "\nTo: "; 1252 Result.getNode()->dump(&DAG); 1253 dbgs() << '\n'); 1254 WorklistRemover DeadNodes(*this); 1255 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result); 1256 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLD.getValue(1)); 1257 deleteAndRecombine(N); 1258 AddToWorklist(Result.getNode()); 1259 return true; 1260 } 1261 return false; 1262 } 1263 1264 /// \brief Recursively delete a node which has no uses and any operands for 1265 /// which it is the only use. 1266 /// 1267 /// Note that this both deletes the nodes and removes them from the worklist. 1268 /// It also adds any nodes who have had a user deleted to the worklist as they 1269 /// may now have only one use and subject to other combines. 1270 bool DAGCombiner::recursivelyDeleteUnusedNodes(SDNode *N) { 1271 if (!N->use_empty()) 1272 return false; 1273 1274 SmallSetVector<SDNode *, 16> Nodes; 1275 Nodes.insert(N); 1276 do { 1277 N = Nodes.pop_back_val(); 1278 if (!N) 1279 continue; 1280 1281 if (N->use_empty()) { 1282 for (const SDValue &ChildN : N->op_values()) 1283 Nodes.insert(ChildN.getNode()); 1284 1285 removeFromWorklist(N); 1286 DAG.DeleteNode(N); 1287 } else { 1288 AddToWorklist(N); 1289 } 1290 } while (!Nodes.empty()); 1291 return true; 1292 } 1293 1294 //===----------------------------------------------------------------------===// 1295 // Main DAG Combiner implementation 1296 //===----------------------------------------------------------------------===// 1297 1298 void DAGCombiner::Run(CombineLevel AtLevel) { 1299 // set the instance variables, so that the various visit routines may use it. 1300 Level = AtLevel; 1301 LegalOperations = Level >= AfterLegalizeVectorOps; 1302 LegalTypes = Level >= AfterLegalizeTypes; 1303 1304 // Add all the dag nodes to the worklist. 1305 for (SDNode &Node : DAG.allnodes()) 1306 AddToWorklist(&Node); 1307 1308 // Create a dummy node (which is not added to allnodes), that adds a reference 1309 // to the root node, preventing it from being deleted, and tracking any 1310 // changes of the root. 1311 HandleSDNode Dummy(DAG.getRoot()); 1312 1313 // While the worklist isn't empty, find a node and try to combine it. 1314 while (!WorklistMap.empty()) { 1315 SDNode *N; 1316 // The Worklist holds the SDNodes in order, but it may contain null entries. 1317 do { 1318 N = Worklist.pop_back_val(); 1319 } while (!N); 1320 1321 bool GoodWorklistEntry = WorklistMap.erase(N); 1322 (void)GoodWorklistEntry; 1323 assert(GoodWorklistEntry && 1324 "Found a worklist entry without a corresponding map entry!"); 1325 1326 // If N has no uses, it is dead. Make sure to revisit all N's operands once 1327 // N is deleted from the DAG, since they too may now be dead or may have a 1328 // reduced number of uses, allowing other xforms. 1329 if (recursivelyDeleteUnusedNodes(N)) 1330 continue; 1331 1332 WorklistRemover DeadNodes(*this); 1333 1334 // If this combine is running after legalizing the DAG, re-legalize any 1335 // nodes pulled off the worklist. 1336 if (Level == AfterLegalizeDAG) { 1337 SmallSetVector<SDNode *, 16> UpdatedNodes; 1338 bool NIsValid = DAG.LegalizeOp(N, UpdatedNodes); 1339 1340 for (SDNode *LN : UpdatedNodes) { 1341 AddToWorklist(LN); 1342 AddUsersToWorklist(LN); 1343 } 1344 if (!NIsValid) 1345 continue; 1346 } 1347 1348 DEBUG(dbgs() << "\nCombining: "; N->dump(&DAG)); 1349 1350 // Add any operands of the new node which have not yet been combined to the 1351 // worklist as well. Because the worklist uniques things already, this 1352 // won't repeatedly process the same operand. 1353 CombinedNodes.insert(N); 1354 for (const SDValue &ChildN : N->op_values()) 1355 if (!CombinedNodes.count(ChildN.getNode())) 1356 AddToWorklist(ChildN.getNode()); 1357 1358 SDValue RV = combine(N); 1359 1360 if (!RV.getNode()) 1361 continue; 1362 1363 ++NodesCombined; 1364 1365 // If we get back the same node we passed in, rather than a new node or 1366 // zero, we know that the node must have defined multiple values and 1367 // CombineTo was used. Since CombineTo takes care of the worklist 1368 // mechanics for us, we have no work to do in this case. 1369 if (RV.getNode() == N) 1370 continue; 1371 1372 assert(N->getOpcode() != ISD::DELETED_NODE && 1373 RV.getOpcode() != ISD::DELETED_NODE && 1374 "Node was deleted but visit returned new node!"); 1375 1376 DEBUG(dbgs() << " ... into: "; 1377 RV.getNode()->dump(&DAG)); 1378 1379 if (N->getNumValues() == RV.getNode()->getNumValues()) 1380 DAG.ReplaceAllUsesWith(N, RV.getNode()); 1381 else { 1382 assert(N->getValueType(0) == RV.getValueType() && 1383 N->getNumValues() == 1 && "Type mismatch"); 1384 DAG.ReplaceAllUsesWith(N, &RV); 1385 } 1386 1387 // Push the new node and any users onto the worklist 1388 AddToWorklist(RV.getNode()); 1389 AddUsersToWorklist(RV.getNode()); 1390 1391 // Finally, if the node is now dead, remove it from the graph. The node 1392 // may not be dead if the replacement process recursively simplified to 1393 // something else needing this node. This will also take care of adding any 1394 // operands which have lost a user to the worklist. 1395 recursivelyDeleteUnusedNodes(N); 1396 } 1397 1398 // If the root changed (e.g. it was a dead load, update the root). 1399 DAG.setRoot(Dummy.getValue()); 1400 DAG.RemoveDeadNodes(); 1401 } 1402 1403 SDValue DAGCombiner::visit(SDNode *N) { 1404 switch (N->getOpcode()) { 1405 default: break; 1406 case ISD::TokenFactor: return visitTokenFactor(N); 1407 case ISD::MERGE_VALUES: return visitMERGE_VALUES(N); 1408 case ISD::ADD: return visitADD(N); 1409 case ISD::SUB: return visitSUB(N); 1410 case ISD::ADDC: return visitADDC(N); 1411 case ISD::UADDO: return visitUADDO(N); 1412 case ISD::SUBC: return visitSUBC(N); 1413 case ISD::USUBO: return visitUSUBO(N); 1414 case ISD::ADDE: return visitADDE(N); 1415 case ISD::SUBE: return visitSUBE(N); 1416 case ISD::MUL: return visitMUL(N); 1417 case ISD::SDIV: return visitSDIV(N); 1418 case ISD::UDIV: return visitUDIV(N); 1419 case ISD::SREM: 1420 case ISD::UREM: return visitREM(N); 1421 case ISD::MULHU: return visitMULHU(N); 1422 case ISD::MULHS: return visitMULHS(N); 1423 case ISD::SMUL_LOHI: return visitSMUL_LOHI(N); 1424 case ISD::UMUL_LOHI: return visitUMUL_LOHI(N); 1425 case ISD::SMULO: return visitSMULO(N); 1426 case ISD::UMULO: return visitUMULO(N); 1427 case ISD::SMIN: 1428 case ISD::SMAX: 1429 case ISD::UMIN: 1430 case ISD::UMAX: return visitIMINMAX(N); 1431 case ISD::AND: return visitAND(N); 1432 case ISD::OR: return visitOR(N); 1433 case ISD::XOR: return visitXOR(N); 1434 case ISD::SHL: return visitSHL(N); 1435 case ISD::SRA: return visitSRA(N); 1436 case ISD::SRL: return visitSRL(N); 1437 case ISD::ROTR: 1438 case ISD::ROTL: return visitRotate(N); 1439 case ISD::ABS: return visitABS(N); 1440 case ISD::BSWAP: return visitBSWAP(N); 1441 case ISD::BITREVERSE: return visitBITREVERSE(N); 1442 case ISD::CTLZ: return visitCTLZ(N); 1443 case ISD::CTLZ_ZERO_UNDEF: return visitCTLZ_ZERO_UNDEF(N); 1444 case ISD::CTTZ: return visitCTTZ(N); 1445 case ISD::CTTZ_ZERO_UNDEF: return visitCTTZ_ZERO_UNDEF(N); 1446 case ISD::CTPOP: return visitCTPOP(N); 1447 case ISD::SELECT: return visitSELECT(N); 1448 case ISD::VSELECT: return visitVSELECT(N); 1449 case ISD::SELECT_CC: return visitSELECT_CC(N); 1450 case ISD::SETCC: return visitSETCC(N); 1451 case ISD::SETCCE: return visitSETCCE(N); 1452 case ISD::SIGN_EXTEND: return visitSIGN_EXTEND(N); 1453 case ISD::ZERO_EXTEND: return visitZERO_EXTEND(N); 1454 case ISD::ANY_EXTEND: return visitANY_EXTEND(N); 1455 case ISD::AssertZext: return visitAssertZext(N); 1456 case ISD::SIGN_EXTEND_INREG: return visitSIGN_EXTEND_INREG(N); 1457 case ISD::SIGN_EXTEND_VECTOR_INREG: return visitSIGN_EXTEND_VECTOR_INREG(N); 1458 case ISD::ZERO_EXTEND_VECTOR_INREG: return visitZERO_EXTEND_VECTOR_INREG(N); 1459 case ISD::TRUNCATE: return visitTRUNCATE(N); 1460 case ISD::BITCAST: return visitBITCAST(N); 1461 case ISD::BUILD_PAIR: return visitBUILD_PAIR(N); 1462 case ISD::FADD: return visitFADD(N); 1463 case ISD::FSUB: return visitFSUB(N); 1464 case ISD::FMUL: return visitFMUL(N); 1465 case ISD::FMA: return visitFMA(N); 1466 case ISD::FDIV: return visitFDIV(N); 1467 case ISD::FREM: return visitFREM(N); 1468 case ISD::FSQRT: return visitFSQRT(N); 1469 case ISD::FCOPYSIGN: return visitFCOPYSIGN(N); 1470 case ISD::SINT_TO_FP: return visitSINT_TO_FP(N); 1471 case ISD::UINT_TO_FP: return visitUINT_TO_FP(N); 1472 case ISD::FP_TO_SINT: return visitFP_TO_SINT(N); 1473 case ISD::FP_TO_UINT: return visitFP_TO_UINT(N); 1474 case ISD::FP_ROUND: return visitFP_ROUND(N); 1475 case ISD::FP_ROUND_INREG: return visitFP_ROUND_INREG(N); 1476 case ISD::FP_EXTEND: return visitFP_EXTEND(N); 1477 case ISD::FNEG: return visitFNEG(N); 1478 case ISD::FABS: return visitFABS(N); 1479 case ISD::FFLOOR: return visitFFLOOR(N); 1480 case ISD::FMINNUM: return visitFMINNUM(N); 1481 case ISD::FMAXNUM: return visitFMAXNUM(N); 1482 case ISD::FCEIL: return visitFCEIL(N); 1483 case ISD::FTRUNC: return visitFTRUNC(N); 1484 case ISD::BRCOND: return visitBRCOND(N); 1485 case ISD::BR_CC: return visitBR_CC(N); 1486 case ISD::LOAD: return visitLOAD(N); 1487 case ISD::STORE: return visitSTORE(N); 1488 case ISD::INSERT_VECTOR_ELT: return visitINSERT_VECTOR_ELT(N); 1489 case ISD::EXTRACT_VECTOR_ELT: return visitEXTRACT_VECTOR_ELT(N); 1490 case ISD::BUILD_VECTOR: return visitBUILD_VECTOR(N); 1491 case ISD::CONCAT_VECTORS: return visitCONCAT_VECTORS(N); 1492 case ISD::EXTRACT_SUBVECTOR: return visitEXTRACT_SUBVECTOR(N); 1493 case ISD::VECTOR_SHUFFLE: return visitVECTOR_SHUFFLE(N); 1494 case ISD::SCALAR_TO_VECTOR: return visitSCALAR_TO_VECTOR(N); 1495 case ISD::INSERT_SUBVECTOR: return visitINSERT_SUBVECTOR(N); 1496 case ISD::MGATHER: return visitMGATHER(N); 1497 case ISD::MLOAD: return visitMLOAD(N); 1498 case ISD::MSCATTER: return visitMSCATTER(N); 1499 case ISD::MSTORE: return visitMSTORE(N); 1500 case ISD::FP_TO_FP16: return visitFP_TO_FP16(N); 1501 case ISD::FP16_TO_FP: return visitFP16_TO_FP(N); 1502 } 1503 return SDValue(); 1504 } 1505 1506 SDValue DAGCombiner::combine(SDNode *N) { 1507 SDValue RV = visit(N); 1508 1509 // If nothing happened, try a target-specific DAG combine. 1510 if (!RV.getNode()) { 1511 assert(N->getOpcode() != ISD::DELETED_NODE && 1512 "Node was deleted but visit returned NULL!"); 1513 1514 if (N->getOpcode() >= ISD::BUILTIN_OP_END || 1515 TLI.hasTargetDAGCombine((ISD::NodeType)N->getOpcode())) { 1516 1517 // Expose the DAG combiner to the target combiner impls. 1518 TargetLowering::DAGCombinerInfo 1519 DagCombineInfo(DAG, Level, false, this); 1520 1521 RV = TLI.PerformDAGCombine(N, DagCombineInfo); 1522 } 1523 } 1524 1525 // If nothing happened still, try promoting the operation. 1526 if (!RV.getNode()) { 1527 switch (N->getOpcode()) { 1528 default: break; 1529 case ISD::ADD: 1530 case ISD::SUB: 1531 case ISD::MUL: 1532 case ISD::AND: 1533 case ISD::OR: 1534 case ISD::XOR: 1535 RV = PromoteIntBinOp(SDValue(N, 0)); 1536 break; 1537 case ISD::SHL: 1538 case ISD::SRA: 1539 case ISD::SRL: 1540 RV = PromoteIntShiftOp(SDValue(N, 0)); 1541 break; 1542 case ISD::SIGN_EXTEND: 1543 case ISD::ZERO_EXTEND: 1544 case ISD::ANY_EXTEND: 1545 RV = PromoteExtend(SDValue(N, 0)); 1546 break; 1547 case ISD::LOAD: 1548 if (PromoteLoad(SDValue(N, 0))) 1549 RV = SDValue(N, 0); 1550 break; 1551 } 1552 } 1553 1554 // If N is a commutative binary node, try commuting it to enable more 1555 // sdisel CSE. 1556 if (!RV.getNode() && SelectionDAG::isCommutativeBinOp(N->getOpcode()) && 1557 N->getNumValues() == 1) { 1558 SDValue N0 = N->getOperand(0); 1559 SDValue N1 = N->getOperand(1); 1560 1561 // Constant operands are canonicalized to RHS. 1562 if (isa<ConstantSDNode>(N0) || !isa<ConstantSDNode>(N1)) { 1563 SDValue Ops[] = {N1, N0}; 1564 SDNode *CSENode = DAG.getNodeIfExists(N->getOpcode(), N->getVTList(), Ops, 1565 N->getFlags()); 1566 if (CSENode) 1567 return SDValue(CSENode, 0); 1568 } 1569 } 1570 1571 return RV; 1572 } 1573 1574 /// Given a node, return its input chain if it has one, otherwise return a null 1575 /// sd operand. 1576 static SDValue getInputChainForNode(SDNode *N) { 1577 if (unsigned NumOps = N->getNumOperands()) { 1578 if (N->getOperand(0).getValueType() == MVT::Other) 1579 return N->getOperand(0); 1580 if (N->getOperand(NumOps-1).getValueType() == MVT::Other) 1581 return N->getOperand(NumOps-1); 1582 for (unsigned i = 1; i < NumOps-1; ++i) 1583 if (N->getOperand(i).getValueType() == MVT::Other) 1584 return N->getOperand(i); 1585 } 1586 return SDValue(); 1587 } 1588 1589 SDValue DAGCombiner::visitTokenFactor(SDNode *N) { 1590 // If N has two operands, where one has an input chain equal to the other, 1591 // the 'other' chain is redundant. 1592 if (N->getNumOperands() == 2) { 1593 if (getInputChainForNode(N->getOperand(0).getNode()) == N->getOperand(1)) 1594 return N->getOperand(0); 1595 if (getInputChainForNode(N->getOperand(1).getNode()) == N->getOperand(0)) 1596 return N->getOperand(1); 1597 } 1598 1599 SmallVector<SDNode *, 8> TFs; // List of token factors to visit. 1600 SmallVector<SDValue, 8> Ops; // Ops for replacing token factor. 1601 SmallPtrSet<SDNode*, 16> SeenOps; 1602 bool Changed = false; // If we should replace this token factor. 1603 1604 // Start out with this token factor. 1605 TFs.push_back(N); 1606 1607 // Iterate through token factors. The TFs grows when new token factors are 1608 // encountered. 1609 for (unsigned i = 0; i < TFs.size(); ++i) { 1610 SDNode *TF = TFs[i]; 1611 1612 // Check each of the operands. 1613 for (const SDValue &Op : TF->op_values()) { 1614 1615 switch (Op.getOpcode()) { 1616 case ISD::EntryToken: 1617 // Entry tokens don't need to be added to the list. They are 1618 // redundant. 1619 Changed = true; 1620 break; 1621 1622 case ISD::TokenFactor: 1623 if (Op.hasOneUse() && !is_contained(TFs, Op.getNode())) { 1624 // Queue up for processing. 1625 TFs.push_back(Op.getNode()); 1626 // Clean up in case the token factor is removed. 1627 AddToWorklist(Op.getNode()); 1628 Changed = true; 1629 break; 1630 } 1631 LLVM_FALLTHROUGH; 1632 1633 default: 1634 // Only add if it isn't already in the list. 1635 if (SeenOps.insert(Op.getNode()).second) 1636 Ops.push_back(Op); 1637 else 1638 Changed = true; 1639 break; 1640 } 1641 } 1642 } 1643 1644 // Remove Nodes that are chained to another node in the list. Do so 1645 // by walking up chains breath-first stopping when we've seen 1646 // another operand. In general we must climb to the EntryNode, but we can exit 1647 // early if we find all remaining work is associated with just one operand as 1648 // no further pruning is possible. 1649 1650 // List of nodes to search through and original Ops from which they originate. 1651 SmallVector<std::pair<SDNode *, unsigned>, 8> Worklist; 1652 SmallVector<unsigned, 8> OpWorkCount; // Count of work for each Op. 1653 SmallPtrSet<SDNode *, 16> SeenChains; 1654 bool DidPruneOps = false; 1655 1656 unsigned NumLeftToConsider = 0; 1657 for (const SDValue &Op : Ops) { 1658 Worklist.push_back(std::make_pair(Op.getNode(), NumLeftToConsider++)); 1659 OpWorkCount.push_back(1); 1660 } 1661 1662 auto AddToWorklist = [&](unsigned CurIdx, SDNode *Op, unsigned OpNumber) { 1663 // If this is an Op, we can remove the op from the list. Remark any 1664 // search associated with it as from the current OpNumber. 1665 if (SeenOps.count(Op) != 0) { 1666 Changed = true; 1667 DidPruneOps = true; 1668 unsigned OrigOpNumber = 0; 1669 while (OrigOpNumber < Ops.size() && Ops[OrigOpNumber].getNode() != Op) 1670 OrigOpNumber++; 1671 assert((OrigOpNumber != Ops.size()) && 1672 "expected to find TokenFactor Operand"); 1673 // Re-mark worklist from OrigOpNumber to OpNumber 1674 for (unsigned i = CurIdx + 1; i < Worklist.size(); ++i) { 1675 if (Worklist[i].second == OrigOpNumber) { 1676 Worklist[i].second = OpNumber; 1677 } 1678 } 1679 OpWorkCount[OpNumber] += OpWorkCount[OrigOpNumber]; 1680 OpWorkCount[OrigOpNumber] = 0; 1681 NumLeftToConsider--; 1682 } 1683 // Add if it's a new chain 1684 if (SeenChains.insert(Op).second) { 1685 OpWorkCount[OpNumber]++; 1686 Worklist.push_back(std::make_pair(Op, OpNumber)); 1687 } 1688 }; 1689 1690 for (unsigned i = 0; i < Worklist.size() && i < 1024; ++i) { 1691 // We need at least be consider at least 2 Ops to prune. 1692 if (NumLeftToConsider <= 1) 1693 break; 1694 auto CurNode = Worklist[i].first; 1695 auto CurOpNumber = Worklist[i].second; 1696 assert((OpWorkCount[CurOpNumber] > 0) && 1697 "Node should not appear in worklist"); 1698 switch (CurNode->getOpcode()) { 1699 case ISD::EntryToken: 1700 // Hitting EntryToken is the only way for the search to terminate without 1701 // hitting 1702 // another operand's search. Prevent us from marking this operand 1703 // considered. 1704 NumLeftToConsider++; 1705 break; 1706 case ISD::TokenFactor: 1707 for (const SDValue &Op : CurNode->op_values()) 1708 AddToWorklist(i, Op.getNode(), CurOpNumber); 1709 break; 1710 case ISD::CopyFromReg: 1711 case ISD::CopyToReg: 1712 AddToWorklist(i, CurNode->getOperand(0).getNode(), CurOpNumber); 1713 break; 1714 default: 1715 if (auto *MemNode = dyn_cast<MemSDNode>(CurNode)) 1716 AddToWorklist(i, MemNode->getChain().getNode(), CurOpNumber); 1717 break; 1718 } 1719 OpWorkCount[CurOpNumber]--; 1720 if (OpWorkCount[CurOpNumber] == 0) 1721 NumLeftToConsider--; 1722 } 1723 1724 SDValue Result; 1725 1726 // If we've changed things around then replace token factor. 1727 if (Changed) { 1728 if (Ops.empty()) { 1729 // The entry token is the only possible outcome. 1730 Result = DAG.getEntryNode(); 1731 } else { 1732 if (DidPruneOps) { 1733 SmallVector<SDValue, 8> PrunedOps; 1734 // 1735 for (const SDValue &Op : Ops) { 1736 if (SeenChains.count(Op.getNode()) == 0) 1737 PrunedOps.push_back(Op); 1738 } 1739 Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, PrunedOps); 1740 } else { 1741 Result = DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Ops); 1742 } 1743 } 1744 1745 // Add users to worklist, since we may introduce a lot of new 1746 // chained token factors while removing memory deps. 1747 return CombineTo(N, Result, true /*add to worklist*/); 1748 } 1749 1750 return Result; 1751 } 1752 1753 /// MERGE_VALUES can always be eliminated. 1754 SDValue DAGCombiner::visitMERGE_VALUES(SDNode *N) { 1755 WorklistRemover DeadNodes(*this); 1756 // Replacing results may cause a different MERGE_VALUES to suddenly 1757 // be CSE'd with N, and carry its uses with it. Iterate until no 1758 // uses remain, to ensure that the node can be safely deleted. 1759 // First add the users of this node to the work list so that they 1760 // can be tried again once they have new operands. 1761 AddUsersToWorklist(N); 1762 do { 1763 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1764 DAG.ReplaceAllUsesOfValueWith(SDValue(N, i), N->getOperand(i)); 1765 } while (!N->use_empty()); 1766 deleteAndRecombine(N); 1767 return SDValue(N, 0); // Return N so it doesn't get rechecked! 1768 } 1769 1770 /// If \p N is a ConstantSDNode with isOpaque() == false return it casted to a 1771 /// ConstantSDNode pointer else nullptr. 1772 static ConstantSDNode *getAsNonOpaqueConstant(SDValue N) { 1773 ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N); 1774 return Const != nullptr && !Const->isOpaque() ? Const : nullptr; 1775 } 1776 1777 SDValue DAGCombiner::foldBinOpIntoSelect(SDNode *BO) { 1778 auto BinOpcode = BO->getOpcode(); 1779 assert((BinOpcode == ISD::ADD || BinOpcode == ISD::SUB || 1780 BinOpcode == ISD::MUL || BinOpcode == ISD::SDIV || 1781 BinOpcode == ISD::UDIV || BinOpcode == ISD::SREM || 1782 BinOpcode == ISD::UREM || BinOpcode == ISD::AND || 1783 BinOpcode == ISD::OR || BinOpcode == ISD::XOR || 1784 BinOpcode == ISD::SHL || BinOpcode == ISD::SRL || 1785 BinOpcode == ISD::SRA || BinOpcode == ISD::FADD || 1786 BinOpcode == ISD::FSUB || BinOpcode == ISD::FMUL || 1787 BinOpcode == ISD::FDIV || BinOpcode == ISD::FREM) && 1788 "Unexpected binary operator"); 1789 1790 // Bail out if any constants are opaque because we can't constant fold those. 1791 SDValue C1 = BO->getOperand(1); 1792 if (!isConstantOrConstantVector(C1, true) && 1793 !isConstantFPBuildVectorOrConstantFP(C1)) 1794 return SDValue(); 1795 1796 // Don't do this unless the old select is going away. We want to eliminate the 1797 // binary operator, not replace a binop with a select. 1798 // TODO: Handle ISD::SELECT_CC. 1799 SDValue Sel = BO->getOperand(0); 1800 if (Sel.getOpcode() != ISD::SELECT || !Sel.hasOneUse()) 1801 return SDValue(); 1802 1803 SDValue CT = Sel.getOperand(1); 1804 if (!isConstantOrConstantVector(CT, true) && 1805 !isConstantFPBuildVectorOrConstantFP(CT)) 1806 return SDValue(); 1807 1808 SDValue CF = Sel.getOperand(2); 1809 if (!isConstantOrConstantVector(CF, true) && 1810 !isConstantFPBuildVectorOrConstantFP(CF)) 1811 return SDValue(); 1812 1813 // We have a select-of-constants followed by a binary operator with a 1814 // constant. Eliminate the binop by pulling the constant math into the select. 1815 // Example: add (select Cond, CT, CF), C1 --> select Cond, CT + C1, CF + C1 1816 EVT VT = Sel.getValueType(); 1817 SDLoc DL(Sel); 1818 SDValue NewCT = DAG.getNode(BinOpcode, DL, VT, CT, C1); 1819 assert((NewCT.isUndef() || isConstantOrConstantVector(NewCT) || 1820 isConstantFPBuildVectorOrConstantFP(NewCT)) && 1821 "Failed to constant fold a binop with constant operands"); 1822 1823 SDValue NewCF = DAG.getNode(BinOpcode, DL, VT, CF, C1); 1824 assert((NewCF.isUndef() || isConstantOrConstantVector(NewCF) || 1825 isConstantFPBuildVectorOrConstantFP(NewCF)) && 1826 "Failed to constant fold a binop with constant operands"); 1827 1828 return DAG.getSelect(DL, VT, Sel.getOperand(0), NewCT, NewCF); 1829 } 1830 1831 SDValue DAGCombiner::visitADD(SDNode *N) { 1832 SDValue N0 = N->getOperand(0); 1833 SDValue N1 = N->getOperand(1); 1834 EVT VT = N0.getValueType(); 1835 SDLoc DL(N); 1836 1837 // fold vector ops 1838 if (VT.isVector()) { 1839 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 1840 return FoldedVOp; 1841 1842 // fold (add x, 0) -> x, vector edition 1843 if (ISD::isBuildVectorAllZeros(N1.getNode())) 1844 return N0; 1845 if (ISD::isBuildVectorAllZeros(N0.getNode())) 1846 return N1; 1847 } 1848 1849 // fold (add x, undef) -> undef 1850 if (N0.isUndef()) 1851 return N0; 1852 1853 if (N1.isUndef()) 1854 return N1; 1855 1856 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) { 1857 // canonicalize constant to RHS 1858 if (!DAG.isConstantIntBuildVectorOrConstantInt(N1)) 1859 return DAG.getNode(ISD::ADD, DL, VT, N1, N0); 1860 // fold (add c1, c2) -> c1+c2 1861 return DAG.FoldConstantArithmetic(ISD::ADD, DL, VT, N0.getNode(), 1862 N1.getNode()); 1863 } 1864 1865 // fold (add x, 0) -> x 1866 if (isNullConstant(N1)) 1867 return N0; 1868 1869 // fold ((c1-A)+c2) -> (c1+c2)-A 1870 if (isConstantOrConstantVector(N1, /* NoOpaque */ true)) { 1871 if (N0.getOpcode() == ISD::SUB) 1872 if (isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) { 1873 return DAG.getNode(ISD::SUB, DL, VT, 1874 DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(0)), 1875 N0.getOperand(1)); 1876 } 1877 } 1878 1879 if (SDValue NewSel = foldBinOpIntoSelect(N)) 1880 return NewSel; 1881 1882 // reassociate add 1883 if (SDValue RADD = ReassociateOps(ISD::ADD, DL, N0, N1)) 1884 return RADD; 1885 1886 // fold ((0-A) + B) -> B-A 1887 if (N0.getOpcode() == ISD::SUB && 1888 isNullConstantOrNullSplatConstant(N0.getOperand(0))) 1889 return DAG.getNode(ISD::SUB, DL, VT, N1, N0.getOperand(1)); 1890 1891 // fold (A + (0-B)) -> A-B 1892 if (N1.getOpcode() == ISD::SUB && 1893 isNullConstantOrNullSplatConstant(N1.getOperand(0))) 1894 return DAG.getNode(ISD::SUB, DL, VT, N0, N1.getOperand(1)); 1895 1896 // fold (A+(B-A)) -> B 1897 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(1)) 1898 return N1.getOperand(0); 1899 1900 // fold ((B-A)+A) -> B 1901 if (N0.getOpcode() == ISD::SUB && N1 == N0.getOperand(1)) 1902 return N0.getOperand(0); 1903 1904 // fold (A+(B-(A+C))) to (B-C) 1905 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1906 N0 == N1.getOperand(1).getOperand(0)) 1907 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0), 1908 N1.getOperand(1).getOperand(1)); 1909 1910 // fold (A+(B-(C+A))) to (B-C) 1911 if (N1.getOpcode() == ISD::SUB && N1.getOperand(1).getOpcode() == ISD::ADD && 1912 N0 == N1.getOperand(1).getOperand(1)) 1913 return DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(0), 1914 N1.getOperand(1).getOperand(0)); 1915 1916 // fold (A+((B-A)+or-C)) to (B+or-C) 1917 if ((N1.getOpcode() == ISD::SUB || N1.getOpcode() == ISD::ADD) && 1918 N1.getOperand(0).getOpcode() == ISD::SUB && 1919 N0 == N1.getOperand(0).getOperand(1)) 1920 return DAG.getNode(N1.getOpcode(), DL, VT, N1.getOperand(0).getOperand(0), 1921 N1.getOperand(1)); 1922 1923 // fold (A-B)+(C-D) to (A+C)-(B+D) when A or C is constant 1924 if (N0.getOpcode() == ISD::SUB && N1.getOpcode() == ISD::SUB) { 1925 SDValue N00 = N0.getOperand(0); 1926 SDValue N01 = N0.getOperand(1); 1927 SDValue N10 = N1.getOperand(0); 1928 SDValue N11 = N1.getOperand(1); 1929 1930 if (isConstantOrConstantVector(N00) || isConstantOrConstantVector(N10)) 1931 return DAG.getNode(ISD::SUB, DL, VT, 1932 DAG.getNode(ISD::ADD, SDLoc(N0), VT, N00, N10), 1933 DAG.getNode(ISD::ADD, SDLoc(N1), VT, N01, N11)); 1934 } 1935 1936 if (SimplifyDemandedBits(SDValue(N, 0))) 1937 return SDValue(N, 0); 1938 1939 // fold (a+b) -> (a|b) iff a and b share no bits. 1940 if ((!LegalOperations || TLI.isOperationLegal(ISD::OR, VT)) && 1941 VT.isInteger() && DAG.haveNoCommonBitsSet(N0, N1)) 1942 return DAG.getNode(ISD::OR, DL, VT, N0, N1); 1943 1944 if (SDValue Combined = visitADDLike(N0, N1, N)) 1945 return Combined; 1946 1947 if (SDValue Combined = visitADDLike(N1, N0, N)) 1948 return Combined; 1949 1950 return SDValue(); 1951 } 1952 1953 SDValue DAGCombiner::visitADDLike(SDValue N0, SDValue N1, SDNode *LocReference) { 1954 EVT VT = N0.getValueType(); 1955 SDLoc DL(LocReference); 1956 1957 // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n)) 1958 if (N1.getOpcode() == ISD::SHL && N1.getOperand(0).getOpcode() == ISD::SUB && 1959 isNullConstantOrNullSplatConstant(N1.getOperand(0).getOperand(0))) 1960 return DAG.getNode(ISD::SUB, DL, VT, N0, 1961 DAG.getNode(ISD::SHL, DL, VT, 1962 N1.getOperand(0).getOperand(1), 1963 N1.getOperand(1))); 1964 1965 if (N1.getOpcode() == ISD::AND) { 1966 SDValue AndOp0 = N1.getOperand(0); 1967 unsigned NumSignBits = DAG.ComputeNumSignBits(AndOp0); 1968 unsigned DestBits = VT.getScalarSizeInBits(); 1969 1970 // (add z, (and (sbbl x, x), 1)) -> (sub z, (sbbl x, x)) 1971 // and similar xforms where the inner op is either ~0 or 0. 1972 if (NumSignBits == DestBits && 1973 isOneConstantOrOneSplatConstant(N1->getOperand(1))) 1974 return DAG.getNode(ISD::SUB, DL, VT, N0, AndOp0); 1975 } 1976 1977 // add (sext i1), X -> sub X, (zext i1) 1978 if (N0.getOpcode() == ISD::SIGN_EXTEND && 1979 N0.getOperand(0).getValueType() == MVT::i1 && 1980 !TLI.isOperationLegal(ISD::SIGN_EXTEND, MVT::i1)) { 1981 SDValue ZExt = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)); 1982 return DAG.getNode(ISD::SUB, DL, VT, N1, ZExt); 1983 } 1984 1985 // add X, (sextinreg Y i1) -> sub X, (and Y 1) 1986 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { 1987 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1)); 1988 if (TN->getVT() == MVT::i1) { 1989 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), 1990 DAG.getConstant(1, DL, VT)); 1991 return DAG.getNode(ISD::SUB, DL, VT, N0, ZExt); 1992 } 1993 } 1994 1995 return SDValue(); 1996 } 1997 1998 SDValue DAGCombiner::visitADDC(SDNode *N) { 1999 SDValue N0 = N->getOperand(0); 2000 SDValue N1 = N->getOperand(1); 2001 EVT VT = N0.getValueType(); 2002 SDLoc DL(N); 2003 2004 // If the flag result is dead, turn this into an ADD. 2005 if (!N->hasAnyUseOfValue(1)) 2006 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1), 2007 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2008 2009 // canonicalize constant to RHS. 2010 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 2011 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2012 if (N0C && !N1C) 2013 return DAG.getNode(ISD::ADDC, DL, N->getVTList(), N1, N0); 2014 2015 // fold (addc x, 0) -> x + no carry out 2016 if (isNullConstant(N1)) 2017 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, 2018 DL, MVT::Glue)); 2019 2020 // If it cannot overflow, transform into an add. 2021 if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never) 2022 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1), 2023 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2024 2025 return SDValue(); 2026 } 2027 2028 SDValue DAGCombiner::visitUADDO(SDNode *N) { 2029 SDValue N0 = N->getOperand(0); 2030 SDValue N1 = N->getOperand(1); 2031 EVT VT = N0.getValueType(); 2032 if (VT.isVector()) 2033 return SDValue(); 2034 2035 EVT CarryVT = N->getValueType(1); 2036 SDLoc DL(N); 2037 2038 // If the flag result is dead, turn this into an ADD. 2039 if (!N->hasAnyUseOfValue(1)) 2040 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1), 2041 DAG.getUNDEF(CarryVT)); 2042 2043 // canonicalize constant to RHS. 2044 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 2045 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2046 if (N0C && !N1C) 2047 return DAG.getNode(ISD::UADDO, DL, N->getVTList(), N1, N0); 2048 2049 // fold (uaddo x, 0) -> x + no carry out 2050 if (isNullConstant(N1)) 2051 return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT)); 2052 2053 // If it cannot overflow, transform into an add. 2054 if (DAG.computeOverflowKind(N0, N1) == SelectionDAG::OFK_Never) 2055 return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1), 2056 DAG.getConstant(0, DL, CarryVT)); 2057 2058 return SDValue(); 2059 } 2060 2061 SDValue DAGCombiner::visitADDE(SDNode *N) { 2062 SDValue N0 = N->getOperand(0); 2063 SDValue N1 = N->getOperand(1); 2064 SDValue CarryIn = N->getOperand(2); 2065 2066 // canonicalize constant to RHS 2067 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0); 2068 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 2069 if (N0C && !N1C) 2070 return DAG.getNode(ISD::ADDE, SDLoc(N), N->getVTList(), 2071 N1, N0, CarryIn); 2072 2073 // fold (adde x, y, false) -> (addc x, y) 2074 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 2075 return DAG.getNode(ISD::ADDC, SDLoc(N), N->getVTList(), N0, N1); 2076 2077 return SDValue(); 2078 } 2079 2080 // Since it may not be valid to emit a fold to zero for vector initializers 2081 // check if we can before folding. 2082 static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT, 2083 SelectionDAG &DAG, bool LegalOperations, 2084 bool LegalTypes) { 2085 if (!VT.isVector()) 2086 return DAG.getConstant(0, DL, VT); 2087 if (!LegalOperations || TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 2088 return DAG.getConstant(0, DL, VT); 2089 return SDValue(); 2090 } 2091 2092 SDValue DAGCombiner::visitSUB(SDNode *N) { 2093 SDValue N0 = N->getOperand(0); 2094 SDValue N1 = N->getOperand(1); 2095 EVT VT = N0.getValueType(); 2096 SDLoc DL(N); 2097 2098 // fold vector ops 2099 if (VT.isVector()) { 2100 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2101 return FoldedVOp; 2102 2103 // fold (sub x, 0) -> x, vector edition 2104 if (ISD::isBuildVectorAllZeros(N1.getNode())) 2105 return N0; 2106 } 2107 2108 // fold (sub x, x) -> 0 2109 // FIXME: Refactor this and xor and other similar operations together. 2110 if (N0 == N1) 2111 return tryFoldToZero(DL, TLI, VT, DAG, LegalOperations, LegalTypes); 2112 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 2113 DAG.isConstantIntBuildVectorOrConstantInt(N1)) { 2114 // fold (sub c1, c2) -> c1-c2 2115 return DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, N0.getNode(), 2116 N1.getNode()); 2117 } 2118 2119 if (SDValue NewSel = foldBinOpIntoSelect(N)) 2120 return NewSel; 2121 2122 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1); 2123 2124 // fold (sub x, c) -> (add x, -c) 2125 if (N1C) { 2126 return DAG.getNode(ISD::ADD, DL, VT, N0, 2127 DAG.getConstant(-N1C->getAPIntValue(), DL, VT)); 2128 } 2129 2130 if (isNullConstantOrNullSplatConstant(N0)) { 2131 unsigned BitWidth = VT.getScalarSizeInBits(); 2132 // Right-shifting everything out but the sign bit followed by negation is 2133 // the same as flipping arithmetic/logical shift type without the negation: 2134 // -(X >>u 31) -> (X >>s 31) 2135 // -(X >>s 31) -> (X >>u 31) 2136 if (N1->getOpcode() == ISD::SRA || N1->getOpcode() == ISD::SRL) { 2137 ConstantSDNode *ShiftAmt = isConstOrConstSplat(N1.getOperand(1)); 2138 if (ShiftAmt && ShiftAmt->getZExtValue() == BitWidth - 1) { 2139 auto NewSh = N1->getOpcode() == ISD::SRA ? ISD::SRL : ISD::SRA; 2140 if (!LegalOperations || TLI.isOperationLegal(NewSh, VT)) 2141 return DAG.getNode(NewSh, DL, VT, N1.getOperand(0), N1.getOperand(1)); 2142 } 2143 } 2144 2145 // 0 - X --> 0 if the sub is NUW. 2146 if (N->getFlags()->hasNoUnsignedWrap()) 2147 return N0; 2148 2149 if (DAG.MaskedValueIsZero(N1, ~APInt::getSignBit(BitWidth))) { 2150 // N1 is either 0 or the minimum signed value. If the sub is NSW, then 2151 // N1 must be 0 because negating the minimum signed value is undefined. 2152 if (N->getFlags()->hasNoSignedWrap()) 2153 return N0; 2154 2155 // 0 - X --> X if X is 0 or the minimum signed value. 2156 return N1; 2157 } 2158 } 2159 2160 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) 2161 if (isAllOnesConstantOrAllOnesSplatConstant(N0)) 2162 return DAG.getNode(ISD::XOR, DL, VT, N1, N0); 2163 2164 // fold A-(A-B) -> B 2165 if (N1.getOpcode() == ISD::SUB && N0 == N1.getOperand(0)) 2166 return N1.getOperand(1); 2167 2168 // fold (A+B)-A -> B 2169 if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1) 2170 return N0.getOperand(1); 2171 2172 // fold (A+B)-B -> A 2173 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1) 2174 return N0.getOperand(0); 2175 2176 // fold C2-(A+C1) -> (C2-C1)-A 2177 if (N1.getOpcode() == ISD::ADD) { 2178 SDValue N11 = N1.getOperand(1); 2179 if (isConstantOrConstantVector(N0, /* NoOpaques */ true) && 2180 isConstantOrConstantVector(N11, /* NoOpaques */ true)) { 2181 SDValue NewC = DAG.getNode(ISD::SUB, DL, VT, N0, N11); 2182 return DAG.getNode(ISD::SUB, DL, VT, NewC, N1.getOperand(0)); 2183 } 2184 } 2185 2186 // fold ((A+(B+or-C))-B) -> A+or-C 2187 if (N0.getOpcode() == ISD::ADD && 2188 (N0.getOperand(1).getOpcode() == ISD::SUB || 2189 N0.getOperand(1).getOpcode() == ISD::ADD) && 2190 N0.getOperand(1).getOperand(0) == N1) 2191 return DAG.getNode(N0.getOperand(1).getOpcode(), DL, VT, N0.getOperand(0), 2192 N0.getOperand(1).getOperand(1)); 2193 2194 // fold ((A+(C+B))-B) -> A+C 2195 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1).getOpcode() == ISD::ADD && 2196 N0.getOperand(1).getOperand(1) == N1) 2197 return DAG.getNode(ISD::ADD, DL, VT, N0.getOperand(0), 2198 N0.getOperand(1).getOperand(0)); 2199 2200 // fold ((A-(B-C))-C) -> A-B 2201 if (N0.getOpcode() == ISD::SUB && N0.getOperand(1).getOpcode() == ISD::SUB && 2202 N0.getOperand(1).getOperand(1) == N1) 2203 return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), 2204 N0.getOperand(1).getOperand(0)); 2205 2206 // If either operand of a sub is undef, the result is undef 2207 if (N0.isUndef()) 2208 return N0; 2209 if (N1.isUndef()) 2210 return N1; 2211 2212 // If the relocation model supports it, consider symbol offsets. 2213 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(N0)) 2214 if (!LegalOperations && TLI.isOffsetFoldingLegal(GA)) { 2215 // fold (sub Sym, c) -> Sym-c 2216 if (N1C && GA->getOpcode() == ISD::GlobalAddress) 2217 return DAG.getGlobalAddress(GA->getGlobal(), SDLoc(N1C), VT, 2218 GA->getOffset() - 2219 (uint64_t)N1C->getSExtValue()); 2220 // fold (sub Sym+c1, Sym+c2) -> c1-c2 2221 if (GlobalAddressSDNode *GB = dyn_cast<GlobalAddressSDNode>(N1)) 2222 if (GA->getGlobal() == GB->getGlobal()) 2223 return DAG.getConstant((uint64_t)GA->getOffset() - GB->getOffset(), 2224 DL, VT); 2225 } 2226 2227 // sub X, (sextinreg Y i1) -> add X, (and Y 1) 2228 if (N1.getOpcode() == ISD::SIGN_EXTEND_INREG) { 2229 VTSDNode *TN = cast<VTSDNode>(N1.getOperand(1)); 2230 if (TN->getVT() == MVT::i1) { 2231 SDValue ZExt = DAG.getNode(ISD::AND, DL, VT, N1.getOperand(0), 2232 DAG.getConstant(1, DL, VT)); 2233 return DAG.getNode(ISD::ADD, DL, VT, N0, ZExt); 2234 } 2235 } 2236 2237 return SDValue(); 2238 } 2239 2240 SDValue DAGCombiner::visitSUBC(SDNode *N) { 2241 SDValue N0 = N->getOperand(0); 2242 SDValue N1 = N->getOperand(1); 2243 EVT VT = N0.getValueType(); 2244 SDLoc DL(N); 2245 2246 // If the flag result is dead, turn this into an SUB. 2247 if (!N->hasAnyUseOfValue(1)) 2248 return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1), 2249 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2250 2251 // fold (subc x, x) -> 0 + no borrow 2252 if (N0 == N1) 2253 return CombineTo(N, DAG.getConstant(0, DL, VT), 2254 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2255 2256 // fold (subc x, 0) -> x + no borrow 2257 if (isNullConstant(N1)) 2258 return CombineTo(N, N0, DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2259 2260 // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + no borrow 2261 if (isAllOnesConstant(N0)) 2262 return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0), 2263 DAG.getNode(ISD::CARRY_FALSE, DL, MVT::Glue)); 2264 2265 return SDValue(); 2266 } 2267 2268 SDValue DAGCombiner::visitUSUBO(SDNode *N) { 2269 SDValue N0 = N->getOperand(0); 2270 SDValue N1 = N->getOperand(1); 2271 EVT VT = N0.getValueType(); 2272 if (VT.isVector()) 2273 return SDValue(); 2274 2275 EVT CarryVT = N->getValueType(1); 2276 SDLoc DL(N); 2277 2278 // If the flag result is dead, turn this into an SUB. 2279 if (!N->hasAnyUseOfValue(1)) 2280 return CombineTo(N, DAG.getNode(ISD::SUB, DL, VT, N0, N1), 2281 DAG.getUNDEF(CarryVT)); 2282 2283 // fold (usubo x, x) -> 0 + no borrow 2284 if (N0 == N1) 2285 return CombineTo(N, DAG.getConstant(0, DL, VT), 2286 DAG.getConstant(0, DL, CarryVT)); 2287 2288 // fold (usubo x, 0) -> x + no borrow 2289 if (isNullConstant(N1)) 2290 return CombineTo(N, N0, DAG.getConstant(0, DL, CarryVT)); 2291 2292 // Canonicalize (usubo -1, x) -> ~x, i.e. (xor x, -1) + no borrow 2293 if (isAllOnesConstant(N0)) 2294 return CombineTo(N, DAG.getNode(ISD::XOR, DL, VT, N1, N0), 2295 DAG.getConstant(0, DL, CarryVT)); 2296 2297 return SDValue(); 2298 } 2299 2300 SDValue DAGCombiner::visitSUBE(SDNode *N) { 2301 SDValue N0 = N->getOperand(0); 2302 SDValue N1 = N->getOperand(1); 2303 SDValue CarryIn = N->getOperand(2); 2304 2305 // fold (sube x, y, false) -> (subc x, y) 2306 if (CarryIn.getOpcode() == ISD::CARRY_FALSE) 2307 return DAG.getNode(ISD::SUBC, SDLoc(N), N->getVTList(), N0, N1); 2308 2309 return SDValue(); 2310 } 2311 2312 SDValue DAGCombiner::visitMUL(SDNode *N) { 2313 SDValue N0 = N->getOperand(0); 2314 SDValue N1 = N->getOperand(1); 2315 EVT VT = N0.getValueType(); 2316 2317 // fold (mul x, undef) -> 0 2318 if (N0.isUndef() || N1.isUndef()) 2319 return DAG.getConstant(0, SDLoc(N), VT); 2320 2321 bool N0IsConst = false; 2322 bool N1IsConst = false; 2323 bool N1IsOpaqueConst = false; 2324 bool N0IsOpaqueConst = false; 2325 APInt ConstValue0, ConstValue1; 2326 // fold vector ops 2327 if (VT.isVector()) { 2328 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2329 return FoldedVOp; 2330 2331 N0IsConst = ISD::isConstantSplatVector(N0.getNode(), ConstValue0); 2332 N1IsConst = ISD::isConstantSplatVector(N1.getNode(), ConstValue1); 2333 } else { 2334 N0IsConst = isa<ConstantSDNode>(N0); 2335 if (N0IsConst) { 2336 ConstValue0 = cast<ConstantSDNode>(N0)->getAPIntValue(); 2337 N0IsOpaqueConst = cast<ConstantSDNode>(N0)->isOpaque(); 2338 } 2339 N1IsConst = isa<ConstantSDNode>(N1); 2340 if (N1IsConst) { 2341 ConstValue1 = cast<ConstantSDNode>(N1)->getAPIntValue(); 2342 N1IsOpaqueConst = cast<ConstantSDNode>(N1)->isOpaque(); 2343 } 2344 } 2345 2346 // fold (mul c1, c2) -> c1*c2 2347 if (N0IsConst && N1IsConst && !N0IsOpaqueConst && !N1IsOpaqueConst) 2348 return DAG.FoldConstantArithmetic(ISD::MUL, SDLoc(N), VT, 2349 N0.getNode(), N1.getNode()); 2350 2351 // canonicalize constant to RHS (vector doesn't have to splat) 2352 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 2353 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 2354 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N1, N0); 2355 // fold (mul x, 0) -> 0 2356 if (N1IsConst && ConstValue1 == 0) 2357 return N1; 2358 // We require a splat of the entire scalar bit width for non-contiguous 2359 // bit patterns. 2360 bool IsFullSplat = 2361 ConstValue1.getBitWidth() == VT.getScalarSizeInBits(); 2362 // fold (mul x, 1) -> x 2363 if (N1IsConst && ConstValue1 == 1 && IsFullSplat) 2364 return N0; 2365 2366 if (SDValue NewSel = foldBinOpIntoSelect(N)) 2367 return NewSel; 2368 2369 // fold (mul x, -1) -> 0-x 2370 if (N1IsConst && ConstValue1.isAllOnesValue()) { 2371 SDLoc DL(N); 2372 return DAG.getNode(ISD::SUB, DL, VT, 2373 DAG.getConstant(0, DL, VT), N0); 2374 } 2375 // fold (mul x, (1 << c)) -> x << c 2376 if (N1IsConst && !N1IsOpaqueConst && ConstValue1.isPowerOf2() && 2377 IsFullSplat) { 2378 SDLoc DL(N); 2379 return DAG.getNode(ISD::SHL, DL, VT, N0, 2380 DAG.getConstant(ConstValue1.logBase2(), DL, 2381 getShiftAmountTy(N0.getValueType()))); 2382 } 2383 // fold (mul x, -(1 << c)) -> -(x << c) or (-x) << c 2384 if (N1IsConst && !N1IsOpaqueConst && (-ConstValue1).isPowerOf2() && 2385 IsFullSplat) { 2386 unsigned Log2Val = (-ConstValue1).logBase2(); 2387 SDLoc DL(N); 2388 // FIXME: If the input is something that is easily negated (e.g. a 2389 // single-use add), we should put the negate there. 2390 return DAG.getNode(ISD::SUB, DL, VT, 2391 DAG.getConstant(0, DL, VT), 2392 DAG.getNode(ISD::SHL, DL, VT, N0, 2393 DAG.getConstant(Log2Val, DL, 2394 getShiftAmountTy(N0.getValueType())))); 2395 } 2396 2397 // (mul (shl X, c1), c2) -> (mul X, c2 << c1) 2398 if (N0.getOpcode() == ISD::SHL && 2399 isConstantOrConstantVector(N1, /* NoOpaques */ true) && 2400 isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) { 2401 SDValue C3 = DAG.getNode(ISD::SHL, SDLoc(N), VT, N1, N0.getOperand(1)); 2402 if (isConstantOrConstantVector(C3)) 2403 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), C3); 2404 } 2405 2406 // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one 2407 // use. 2408 { 2409 SDValue Sh(nullptr, 0), Y(nullptr, 0); 2410 2411 // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)). 2412 if (N0.getOpcode() == ISD::SHL && 2413 isConstantOrConstantVector(N0.getOperand(1)) && 2414 N0.getNode()->hasOneUse()) { 2415 Sh = N0; Y = N1; 2416 } else if (N1.getOpcode() == ISD::SHL && 2417 isConstantOrConstantVector(N1.getOperand(1)) && 2418 N1.getNode()->hasOneUse()) { 2419 Sh = N1; Y = N0; 2420 } 2421 2422 if (Sh.getNode()) { 2423 SDValue Mul = DAG.getNode(ISD::MUL, SDLoc(N), VT, Sh.getOperand(0), Y); 2424 return DAG.getNode(ISD::SHL, SDLoc(N), VT, Mul, Sh.getOperand(1)); 2425 } 2426 } 2427 2428 // fold (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2) 2429 if (DAG.isConstantIntBuildVectorOrConstantInt(N1) && 2430 N0.getOpcode() == ISD::ADD && 2431 DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1)) && 2432 isMulAddWithConstProfitable(N, N0, N1)) 2433 return DAG.getNode(ISD::ADD, SDLoc(N), VT, 2434 DAG.getNode(ISD::MUL, SDLoc(N0), VT, 2435 N0.getOperand(0), N1), 2436 DAG.getNode(ISD::MUL, SDLoc(N1), VT, 2437 N0.getOperand(1), N1)); 2438 2439 // reassociate mul 2440 if (SDValue RMUL = ReassociateOps(ISD::MUL, SDLoc(N), N0, N1)) 2441 return RMUL; 2442 2443 return SDValue(); 2444 } 2445 2446 /// Return true if divmod libcall is available. 2447 static bool isDivRemLibcallAvailable(SDNode *Node, bool isSigned, 2448 const TargetLowering &TLI) { 2449 RTLIB::Libcall LC; 2450 EVT NodeType = Node->getValueType(0); 2451 if (!NodeType.isSimple()) 2452 return false; 2453 switch (NodeType.getSimpleVT().SimpleTy) { 2454 default: return false; // No libcall for vector types. 2455 case MVT::i8: LC= isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break; 2456 case MVT::i16: LC= isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break; 2457 case MVT::i32: LC= isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break; 2458 case MVT::i64: LC= isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break; 2459 case MVT::i128: LC= isSigned ? RTLIB::SDIVREM_I128:RTLIB::UDIVREM_I128; break; 2460 } 2461 2462 return TLI.getLibcallName(LC) != nullptr; 2463 } 2464 2465 /// Issue divrem if both quotient and remainder are needed. 2466 SDValue DAGCombiner::useDivRem(SDNode *Node) { 2467 if (Node->use_empty()) 2468 return SDValue(); // This is a dead node, leave it alone. 2469 2470 unsigned Opcode = Node->getOpcode(); 2471 bool isSigned = (Opcode == ISD::SDIV) || (Opcode == ISD::SREM); 2472 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 2473 2474 // DivMod lib calls can still work on non-legal types if using lib-calls. 2475 EVT VT = Node->getValueType(0); 2476 if (VT.isVector() || !VT.isInteger()) 2477 return SDValue(); 2478 2479 if (!TLI.isTypeLegal(VT) && !TLI.isOperationCustom(DivRemOpc, VT)) 2480 return SDValue(); 2481 2482 // If DIVREM is going to get expanded into a libcall, 2483 // but there is no libcall available, then don't combine. 2484 if (!TLI.isOperationLegalOrCustom(DivRemOpc, VT) && 2485 !isDivRemLibcallAvailable(Node, isSigned, TLI)) 2486 return SDValue(); 2487 2488 // If div is legal, it's better to do the normal expansion 2489 unsigned OtherOpcode = 0; 2490 if ((Opcode == ISD::SDIV) || (Opcode == ISD::UDIV)) { 2491 OtherOpcode = isSigned ? ISD::SREM : ISD::UREM; 2492 if (TLI.isOperationLegalOrCustom(Opcode, VT)) 2493 return SDValue(); 2494 } else { 2495 OtherOpcode = isSigned ? ISD::SDIV : ISD::UDIV; 2496 if (TLI.isOperationLegalOrCustom(OtherOpcode, VT)) 2497 return SDValue(); 2498 } 2499 2500 SDValue Op0 = Node->getOperand(0); 2501 SDValue Op1 = Node->getOperand(1); 2502 SDValue combined; 2503 for (SDNode::use_iterator UI = Op0.getNode()->use_begin(), 2504 UE = Op0.getNode()->use_end(); UI != UE;) { 2505 SDNode *User = *UI++; 2506 if (User == Node || User->use_empty()) 2507 continue; 2508 // Convert the other matching node(s), too; 2509 // otherwise, the DIVREM may get target-legalized into something 2510 // target-specific that we won't be able to recognize. 2511 unsigned UserOpc = User->getOpcode(); 2512 if ((UserOpc == Opcode || UserOpc == OtherOpcode || UserOpc == DivRemOpc) && 2513 User->getOperand(0) == Op0 && 2514 User->getOperand(1) == Op1) { 2515 if (!combined) { 2516 if (UserOpc == OtherOpcode) { 2517 SDVTList VTs = DAG.getVTList(VT, VT); 2518 combined = DAG.getNode(DivRemOpc, SDLoc(Node), VTs, Op0, Op1); 2519 } else if (UserOpc == DivRemOpc) { 2520 combined = SDValue(User, 0); 2521 } else { 2522 assert(UserOpc == Opcode); 2523 continue; 2524 } 2525 } 2526 if (UserOpc == ISD::SDIV || UserOpc == ISD::UDIV) 2527 CombineTo(User, combined); 2528 else if (UserOpc == ISD::SREM || UserOpc == ISD::UREM) 2529 CombineTo(User, combined.getValue(1)); 2530 } 2531 } 2532 return combined; 2533 } 2534 2535 static SDValue simplifyDivRem(SDNode *N, SelectionDAG &DAG) { 2536 SDValue N0 = N->getOperand(0); 2537 SDValue N1 = N->getOperand(1); 2538 EVT VT = N->getValueType(0); 2539 SDLoc DL(N); 2540 2541 if (DAG.isUndef(N->getOpcode(), {N0, N1})) 2542 return DAG.getUNDEF(VT); 2543 2544 // undef / X -> 0 2545 // undef % X -> 0 2546 if (N0.isUndef()) 2547 return DAG.getConstant(0, DL, VT); 2548 2549 return SDValue(); 2550 } 2551 2552 SDValue DAGCombiner::visitSDIV(SDNode *N) { 2553 SDValue N0 = N->getOperand(0); 2554 SDValue N1 = N->getOperand(1); 2555 EVT VT = N->getValueType(0); 2556 2557 // fold vector ops 2558 if (VT.isVector()) 2559 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2560 return FoldedVOp; 2561 2562 SDLoc DL(N); 2563 2564 // fold (sdiv c1, c2) -> c1/c2 2565 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2566 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2567 if (N0C && N1C && !N0C->isOpaque() && !N1C->isOpaque()) 2568 return DAG.FoldConstantArithmetic(ISD::SDIV, DL, VT, N0C, N1C); 2569 // fold (sdiv X, 1) -> X 2570 if (N1C && N1C->isOne()) 2571 return N0; 2572 // fold (sdiv X, -1) -> 0-X 2573 if (N1C && N1C->isAllOnesValue()) 2574 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), N0); 2575 2576 if (SDValue V = simplifyDivRem(N, DAG)) 2577 return V; 2578 2579 if (SDValue NewSel = foldBinOpIntoSelect(N)) 2580 return NewSel; 2581 2582 // If we know the sign bits of both operands are zero, strength reduce to a 2583 // udiv instead. Handles (X&15) /s 4 -> X&15 >> 2 2584 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 2585 return DAG.getNode(ISD::UDIV, DL, N1.getValueType(), N0, N1); 2586 2587 // fold (sdiv X, pow2) -> simple ops after legalize 2588 // FIXME: We check for the exact bit here because the generic lowering gives 2589 // better results in that case. The target-specific lowering should learn how 2590 // to handle exact sdivs efficiently. 2591 if (N1C && !N1C->isNullValue() && !N1C->isOpaque() && 2592 !cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact() && 2593 (N1C->getAPIntValue().isPowerOf2() || 2594 (-N1C->getAPIntValue()).isPowerOf2())) { 2595 // Target-specific implementation of sdiv x, pow2. 2596 if (SDValue Res = BuildSDIVPow2(N)) 2597 return Res; 2598 2599 unsigned lg2 = N1C->getAPIntValue().countTrailingZeros(); 2600 2601 // Splat the sign bit into the register 2602 SDValue SGN = 2603 DAG.getNode(ISD::SRA, DL, VT, N0, 2604 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, 2605 getShiftAmountTy(N0.getValueType()))); 2606 AddToWorklist(SGN.getNode()); 2607 2608 // Add (N0 < 0) ? abs2 - 1 : 0; 2609 SDValue SRL = 2610 DAG.getNode(ISD::SRL, DL, VT, SGN, 2611 DAG.getConstant(VT.getScalarSizeInBits() - lg2, DL, 2612 getShiftAmountTy(SGN.getValueType()))); 2613 SDValue ADD = DAG.getNode(ISD::ADD, DL, VT, N0, SRL); 2614 AddToWorklist(SRL.getNode()); 2615 AddToWorklist(ADD.getNode()); // Divide by pow2 2616 SDValue SRA = DAG.getNode(ISD::SRA, DL, VT, ADD, 2617 DAG.getConstant(lg2, DL, 2618 getShiftAmountTy(ADD.getValueType()))); 2619 2620 // If we're dividing by a positive value, we're done. Otherwise, we must 2621 // negate the result. 2622 if (N1C->getAPIntValue().isNonNegative()) 2623 return SRA; 2624 2625 AddToWorklist(SRA.getNode()); 2626 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA); 2627 } 2628 2629 // If integer divide is expensive and we satisfy the requirements, emit an 2630 // alternate sequence. Targets may check function attributes for size/speed 2631 // trade-offs. 2632 AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes(); 2633 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr)) 2634 if (SDValue Op = BuildSDIV(N)) 2635 return Op; 2636 2637 // sdiv, srem -> sdivrem 2638 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is 2639 // true. Otherwise, we break the simplification logic in visitREM(). 2640 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) 2641 if (SDValue DivRem = useDivRem(N)) 2642 return DivRem; 2643 2644 return SDValue(); 2645 } 2646 2647 SDValue DAGCombiner::visitUDIV(SDNode *N) { 2648 SDValue N0 = N->getOperand(0); 2649 SDValue N1 = N->getOperand(1); 2650 EVT VT = N->getValueType(0); 2651 2652 // fold vector ops 2653 if (VT.isVector()) 2654 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 2655 return FoldedVOp; 2656 2657 SDLoc DL(N); 2658 2659 // fold (udiv c1, c2) -> c1/c2 2660 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2661 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2662 if (N0C && N1C) 2663 if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UDIV, DL, VT, 2664 N0C, N1C)) 2665 return Folded; 2666 2667 if (SDValue V = simplifyDivRem(N, DAG)) 2668 return V; 2669 2670 if (SDValue NewSel = foldBinOpIntoSelect(N)) 2671 return NewSel; 2672 2673 // fold (udiv x, (1 << c)) -> x >>u c 2674 if (isConstantOrConstantVector(N1, /*NoOpaques*/ true) && 2675 DAG.isKnownToBeAPowerOfTwo(N1)) { 2676 SDValue LogBase2 = BuildLogBase2(N1, DL); 2677 AddToWorklist(LogBase2.getNode()); 2678 2679 EVT ShiftVT = getShiftAmountTy(N0.getValueType()); 2680 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ShiftVT); 2681 AddToWorklist(Trunc.getNode()); 2682 return DAG.getNode(ISD::SRL, DL, VT, N0, Trunc); 2683 } 2684 2685 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2 2686 if (N1.getOpcode() == ISD::SHL) { 2687 SDValue N10 = N1.getOperand(0); 2688 if (isConstantOrConstantVector(N10, /*NoOpaques*/ true) && 2689 DAG.isKnownToBeAPowerOfTwo(N10)) { 2690 SDValue LogBase2 = BuildLogBase2(N10, DL); 2691 AddToWorklist(LogBase2.getNode()); 2692 2693 EVT ADDVT = N1.getOperand(1).getValueType(); 2694 SDValue Trunc = DAG.getZExtOrTrunc(LogBase2, DL, ADDVT); 2695 AddToWorklist(Trunc.getNode()); 2696 SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT, N1.getOperand(1), Trunc); 2697 AddToWorklist(Add.getNode()); 2698 return DAG.getNode(ISD::SRL, DL, VT, N0, Add); 2699 } 2700 } 2701 2702 // fold (udiv x, c) -> alternate 2703 AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes(); 2704 if (N1C && !TLI.isIntDivCheap(N->getValueType(0), Attr)) 2705 if (SDValue Op = BuildUDIV(N)) 2706 return Op; 2707 2708 // sdiv, srem -> sdivrem 2709 // If the divisor is constant, then return DIVREM only if isIntDivCheap() is 2710 // true. Otherwise, we break the simplification logic in visitREM(). 2711 if (!N1C || TLI.isIntDivCheap(N->getValueType(0), Attr)) 2712 if (SDValue DivRem = useDivRem(N)) 2713 return DivRem; 2714 2715 return SDValue(); 2716 } 2717 2718 // handles ISD::SREM and ISD::UREM 2719 SDValue DAGCombiner::visitREM(SDNode *N) { 2720 unsigned Opcode = N->getOpcode(); 2721 SDValue N0 = N->getOperand(0); 2722 SDValue N1 = N->getOperand(1); 2723 EVT VT = N->getValueType(0); 2724 bool isSigned = (Opcode == ISD::SREM); 2725 SDLoc DL(N); 2726 2727 // fold (rem c1, c2) -> c1%c2 2728 ConstantSDNode *N0C = isConstOrConstSplat(N0); 2729 ConstantSDNode *N1C = isConstOrConstSplat(N1); 2730 if (N0C && N1C) 2731 if (SDValue Folded = DAG.FoldConstantArithmetic(Opcode, DL, VT, N0C, N1C)) 2732 return Folded; 2733 2734 if (SDValue V = simplifyDivRem(N, DAG)) 2735 return V; 2736 2737 if (SDValue NewSel = foldBinOpIntoSelect(N)) 2738 return NewSel; 2739 2740 if (isSigned) { 2741 // If we know the sign bits of both operands are zero, strength reduce to a 2742 // urem instead. Handles (X & 0x0FFFFFFF) %s 16 -> X&15 2743 if (DAG.SignBitIsZero(N1) && DAG.SignBitIsZero(N0)) 2744 return DAG.getNode(ISD::UREM, DL, VT, N0, N1); 2745 } else { 2746 SDValue NegOne = DAG.getAllOnesConstant(DL, VT); 2747 if (DAG.isKnownToBeAPowerOfTwo(N1)) { 2748 // fold (urem x, pow2) -> (and x, pow2-1) 2749 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne); 2750 AddToWorklist(Add.getNode()); 2751 return DAG.getNode(ISD::AND, DL, VT, N0, Add); 2752 } 2753 if (N1.getOpcode() == ISD::SHL && 2754 DAG.isKnownToBeAPowerOfTwo(N1.getOperand(0))) { 2755 // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1)) 2756 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N1, NegOne); 2757 AddToWorklist(Add.getNode()); 2758 return DAG.getNode(ISD::AND, DL, VT, N0, Add); 2759 } 2760 } 2761 2762 AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes(); 2763 2764 // If X/C can be simplified by the division-by-constant logic, lower 2765 // X%C to the equivalent of X-X/C*C. 2766 // To avoid mangling nodes, this simplification requires that the combine() 2767 // call for the speculative DIV must not cause a DIVREM conversion. We guard 2768 // against this by skipping the simplification if isIntDivCheap(). When 2769 // div is not cheap, combine will not return a DIVREM. Regardless, 2770 // checking cheapness here makes sense since the simplification results in 2771 // fatter code. 2772 if (N1C && !N1C->isNullValue() && !TLI.isIntDivCheap(VT, Attr)) { 2773 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV; 2774 SDValue Div = DAG.getNode(DivOpcode, DL, VT, N0, N1); 2775 AddToWorklist(Div.getNode()); 2776 SDValue OptimizedDiv = combine(Div.getNode()); 2777 if (OptimizedDiv.getNode() && OptimizedDiv.getNode() != Div.getNode()) { 2778 assert((OptimizedDiv.getOpcode() != ISD::UDIVREM) && 2779 (OptimizedDiv.getOpcode() != ISD::SDIVREM)); 2780 SDValue Mul = DAG.getNode(ISD::MUL, DL, VT, OptimizedDiv, N1); 2781 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, N0, Mul); 2782 AddToWorklist(Mul.getNode()); 2783 return Sub; 2784 } 2785 } 2786 2787 // sdiv, srem -> sdivrem 2788 if (SDValue DivRem = useDivRem(N)) 2789 return DivRem.getValue(1); 2790 2791 return SDValue(); 2792 } 2793 2794 SDValue DAGCombiner::visitMULHS(SDNode *N) { 2795 SDValue N0 = N->getOperand(0); 2796 SDValue N1 = N->getOperand(1); 2797 EVT VT = N->getValueType(0); 2798 SDLoc DL(N); 2799 2800 // fold (mulhs x, 0) -> 0 2801 if (isNullConstant(N1)) 2802 return N1; 2803 // fold (mulhs x, 1) -> (sra x, size(x)-1) 2804 if (isOneConstant(N1)) { 2805 SDLoc DL(N); 2806 return DAG.getNode(ISD::SRA, DL, N0.getValueType(), N0, 2807 DAG.getConstant(N0.getValueSizeInBits() - 1, DL, 2808 getShiftAmountTy(N0.getValueType()))); 2809 } 2810 // fold (mulhs x, undef) -> 0 2811 if (N0.isUndef() || N1.isUndef()) 2812 return DAG.getConstant(0, SDLoc(N), VT); 2813 2814 // If the type twice as wide is legal, transform the mulhs to a wider multiply 2815 // plus a shift. 2816 if (VT.isSimple() && !VT.isVector()) { 2817 MVT Simple = VT.getSimpleVT(); 2818 unsigned SimpleSize = Simple.getSizeInBits(); 2819 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2820 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2821 N0 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N0); 2822 N1 = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N1); 2823 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2824 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2825 DAG.getConstant(SimpleSize, DL, 2826 getShiftAmountTy(N1.getValueType()))); 2827 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2828 } 2829 } 2830 2831 return SDValue(); 2832 } 2833 2834 SDValue DAGCombiner::visitMULHU(SDNode *N) { 2835 SDValue N0 = N->getOperand(0); 2836 SDValue N1 = N->getOperand(1); 2837 EVT VT = N->getValueType(0); 2838 SDLoc DL(N); 2839 2840 // fold (mulhu x, 0) -> 0 2841 if (isNullConstant(N1)) 2842 return N1; 2843 // fold (mulhu x, 1) -> 0 2844 if (isOneConstant(N1)) 2845 return DAG.getConstant(0, DL, N0.getValueType()); 2846 // fold (mulhu x, undef) -> 0 2847 if (N0.isUndef() || N1.isUndef()) 2848 return DAG.getConstant(0, DL, VT); 2849 2850 // If the type twice as wide is legal, transform the mulhu to a wider multiply 2851 // plus a shift. 2852 if (VT.isSimple() && !VT.isVector()) { 2853 MVT Simple = VT.getSimpleVT(); 2854 unsigned SimpleSize = Simple.getSizeInBits(); 2855 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2856 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2857 N0 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N0); 2858 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N1); 2859 N1 = DAG.getNode(ISD::MUL, DL, NewVT, N0, N1); 2860 N1 = DAG.getNode(ISD::SRL, DL, NewVT, N1, 2861 DAG.getConstant(SimpleSize, DL, 2862 getShiftAmountTy(N1.getValueType()))); 2863 return DAG.getNode(ISD::TRUNCATE, DL, VT, N1); 2864 } 2865 } 2866 2867 return SDValue(); 2868 } 2869 2870 /// Perform optimizations common to nodes that compute two values. LoOp and HiOp 2871 /// give the opcodes for the two computations that are being performed. Return 2872 /// true if a simplification was made. 2873 SDValue DAGCombiner::SimplifyNodeWithTwoResults(SDNode *N, unsigned LoOp, 2874 unsigned HiOp) { 2875 // If the high half is not needed, just compute the low half. 2876 bool HiExists = N->hasAnyUseOfValue(1); 2877 if (!HiExists && 2878 (!LegalOperations || 2879 TLI.isOperationLegalOrCustom(LoOp, N->getValueType(0)))) { 2880 SDValue Res = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops()); 2881 return CombineTo(N, Res, Res); 2882 } 2883 2884 // If the low half is not needed, just compute the high half. 2885 bool LoExists = N->hasAnyUseOfValue(0); 2886 if (!LoExists && 2887 (!LegalOperations || 2888 TLI.isOperationLegal(HiOp, N->getValueType(1)))) { 2889 SDValue Res = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops()); 2890 return CombineTo(N, Res, Res); 2891 } 2892 2893 // If both halves are used, return as it is. 2894 if (LoExists && HiExists) 2895 return SDValue(); 2896 2897 // If the two computed results can be simplified separately, separate them. 2898 if (LoExists) { 2899 SDValue Lo = DAG.getNode(LoOp, SDLoc(N), N->getValueType(0), N->ops()); 2900 AddToWorklist(Lo.getNode()); 2901 SDValue LoOpt = combine(Lo.getNode()); 2902 if (LoOpt.getNode() && LoOpt.getNode() != Lo.getNode() && 2903 (!LegalOperations || 2904 TLI.isOperationLegal(LoOpt.getOpcode(), LoOpt.getValueType()))) 2905 return CombineTo(N, LoOpt, LoOpt); 2906 } 2907 2908 if (HiExists) { 2909 SDValue Hi = DAG.getNode(HiOp, SDLoc(N), N->getValueType(1), N->ops()); 2910 AddToWorklist(Hi.getNode()); 2911 SDValue HiOpt = combine(Hi.getNode()); 2912 if (HiOpt.getNode() && HiOpt != Hi && 2913 (!LegalOperations || 2914 TLI.isOperationLegal(HiOpt.getOpcode(), HiOpt.getValueType()))) 2915 return CombineTo(N, HiOpt, HiOpt); 2916 } 2917 2918 return SDValue(); 2919 } 2920 2921 SDValue DAGCombiner::visitSMUL_LOHI(SDNode *N) { 2922 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHS)) 2923 return Res; 2924 2925 EVT VT = N->getValueType(0); 2926 SDLoc DL(N); 2927 2928 // If the type is twice as wide is legal, transform the mulhu to a wider 2929 // multiply plus a shift. 2930 if (VT.isSimple() && !VT.isVector()) { 2931 MVT Simple = VT.getSimpleVT(); 2932 unsigned SimpleSize = Simple.getSizeInBits(); 2933 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2934 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2935 SDValue Lo = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(0)); 2936 SDValue Hi = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, N->getOperand(1)); 2937 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2938 // Compute the high part as N1. 2939 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2940 DAG.getConstant(SimpleSize, DL, 2941 getShiftAmountTy(Lo.getValueType()))); 2942 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2943 // Compute the low part as N0. 2944 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2945 return CombineTo(N, Lo, Hi); 2946 } 2947 } 2948 2949 return SDValue(); 2950 } 2951 2952 SDValue DAGCombiner::visitUMUL_LOHI(SDNode *N) { 2953 if (SDValue Res = SimplifyNodeWithTwoResults(N, ISD::MUL, ISD::MULHU)) 2954 return Res; 2955 2956 EVT VT = N->getValueType(0); 2957 SDLoc DL(N); 2958 2959 // If the type is twice as wide is legal, transform the mulhu to a wider 2960 // multiply plus a shift. 2961 if (VT.isSimple() && !VT.isVector()) { 2962 MVT Simple = VT.getSimpleVT(); 2963 unsigned SimpleSize = Simple.getSizeInBits(); 2964 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), SimpleSize*2); 2965 if (TLI.isOperationLegal(ISD::MUL, NewVT)) { 2966 SDValue Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(0)); 2967 SDValue Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, N->getOperand(1)); 2968 Lo = DAG.getNode(ISD::MUL, DL, NewVT, Lo, Hi); 2969 // Compute the high part as N1. 2970 Hi = DAG.getNode(ISD::SRL, DL, NewVT, Lo, 2971 DAG.getConstant(SimpleSize, DL, 2972 getShiftAmountTy(Lo.getValueType()))); 2973 Hi = DAG.getNode(ISD::TRUNCATE, DL, VT, Hi); 2974 // Compute the low part as N0. 2975 Lo = DAG.getNode(ISD::TRUNCATE, DL, VT, Lo); 2976 return CombineTo(N, Lo, Hi); 2977 } 2978 } 2979 2980 return SDValue(); 2981 } 2982 2983 SDValue DAGCombiner::visitSMULO(SDNode *N) { 2984 // (smulo x, 2) -> (saddo x, x) 2985 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2986 if (C2->getAPIntValue() == 2) 2987 return DAG.getNode(ISD::SADDO, SDLoc(N), N->getVTList(), 2988 N->getOperand(0), N->getOperand(0)); 2989 2990 return SDValue(); 2991 } 2992 2993 SDValue DAGCombiner::visitUMULO(SDNode *N) { 2994 // (umulo x, 2) -> (uaddo x, x) 2995 if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1))) 2996 if (C2->getAPIntValue() == 2) 2997 return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(), 2998 N->getOperand(0), N->getOperand(0)); 2999 3000 return SDValue(); 3001 } 3002 3003 SDValue DAGCombiner::visitIMINMAX(SDNode *N) { 3004 SDValue N0 = N->getOperand(0); 3005 SDValue N1 = N->getOperand(1); 3006 EVT VT = N0.getValueType(); 3007 3008 // fold vector ops 3009 if (VT.isVector()) 3010 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 3011 return FoldedVOp; 3012 3013 // fold (add c1, c2) -> c1+c2 3014 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 3015 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1); 3016 if (N0C && N1C) 3017 return DAG.FoldConstantArithmetic(N->getOpcode(), SDLoc(N), VT, N0C, N1C); 3018 3019 // canonicalize constant to RHS 3020 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 3021 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 3022 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N1, N0); 3023 3024 return SDValue(); 3025 } 3026 3027 /// If this is a binary operator with two operands of the same opcode, try to 3028 /// simplify it. 3029 SDValue DAGCombiner::SimplifyBinOpWithSameOpcodeHands(SDNode *N) { 3030 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 3031 EVT VT = N0.getValueType(); 3032 assert(N0.getOpcode() == N1.getOpcode() && "Bad input!"); 3033 3034 // Bail early if none of these transforms apply. 3035 if (N0.getNumOperands() == 0) return SDValue(); 3036 3037 // For each of OP in AND/OR/XOR: 3038 // fold (OP (zext x), (zext y)) -> (zext (OP x, y)) 3039 // fold (OP (sext x), (sext y)) -> (sext (OP x, y)) 3040 // fold (OP (aext x), (aext y)) -> (aext (OP x, y)) 3041 // fold (OP (bswap x), (bswap y)) -> (bswap (OP x, y)) 3042 // fold (OP (trunc x), (trunc y)) -> (trunc (OP x, y)) (if trunc isn't free) 3043 // 3044 // do not sink logical op inside of a vector extend, since it may combine 3045 // into a vsetcc. 3046 EVT Op0VT = N0.getOperand(0).getValueType(); 3047 if ((N0.getOpcode() == ISD::ZERO_EXTEND || 3048 N0.getOpcode() == ISD::SIGN_EXTEND || 3049 N0.getOpcode() == ISD::BSWAP || 3050 // Avoid infinite looping with PromoteIntBinOp. 3051 (N0.getOpcode() == ISD::ANY_EXTEND && 3052 (!LegalTypes || TLI.isTypeDesirableForOp(N->getOpcode(), Op0VT))) || 3053 (N0.getOpcode() == ISD::TRUNCATE && 3054 (!TLI.isZExtFree(VT, Op0VT) || 3055 !TLI.isTruncateFree(Op0VT, VT)) && 3056 TLI.isTypeLegal(Op0VT))) && 3057 !VT.isVector() && 3058 Op0VT == N1.getOperand(0).getValueType() && 3059 (!LegalOperations || TLI.isOperationLegal(N->getOpcode(), Op0VT))) { 3060 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0), 3061 N0.getOperand(0).getValueType(), 3062 N0.getOperand(0), N1.getOperand(0)); 3063 AddToWorklist(ORNode.getNode()); 3064 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, ORNode); 3065 } 3066 3067 // For each of OP in SHL/SRL/SRA/AND... 3068 // fold (and (OP x, z), (OP y, z)) -> (OP (and x, y), z) 3069 // fold (or (OP x, z), (OP y, z)) -> (OP (or x, y), z) 3070 // fold (xor (OP x, z), (OP y, z)) -> (OP (xor x, y), z) 3071 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL || 3072 N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::AND) && 3073 N0.getOperand(1) == N1.getOperand(1)) { 3074 SDValue ORNode = DAG.getNode(N->getOpcode(), SDLoc(N0), 3075 N0.getOperand(0).getValueType(), 3076 N0.getOperand(0), N1.getOperand(0)); 3077 AddToWorklist(ORNode.getNode()); 3078 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, 3079 ORNode, N0.getOperand(1)); 3080 } 3081 3082 // Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B)) 3083 // Only perform this optimization up until type legalization, before 3084 // LegalizeVectorOprs. LegalizeVectorOprs promotes vector operations by 3085 // adding bitcasts. For example (xor v4i32) is promoted to (v2i64), and 3086 // we don't want to undo this promotion. 3087 // We also handle SCALAR_TO_VECTOR because xor/or/and operations are cheaper 3088 // on scalars. 3089 if ((N0.getOpcode() == ISD::BITCAST || 3090 N0.getOpcode() == ISD::SCALAR_TO_VECTOR) && 3091 Level <= AfterLegalizeTypes) { 3092 SDValue In0 = N0.getOperand(0); 3093 SDValue In1 = N1.getOperand(0); 3094 EVT In0Ty = In0.getValueType(); 3095 EVT In1Ty = In1.getValueType(); 3096 SDLoc DL(N); 3097 // If both incoming values are integers, and the original types are the 3098 // same. 3099 if (In0Ty.isInteger() && In1Ty.isInteger() && In0Ty == In1Ty) { 3100 SDValue Op = DAG.getNode(N->getOpcode(), DL, In0Ty, In0, In1); 3101 SDValue BC = DAG.getNode(N0.getOpcode(), DL, VT, Op); 3102 AddToWorklist(Op.getNode()); 3103 return BC; 3104 } 3105 } 3106 3107 // Xor/and/or are indifferent to the swizzle operation (shuffle of one value). 3108 // Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A,B)) 3109 // If both shuffles use the same mask, and both shuffle within a single 3110 // vector, then it is worthwhile to move the swizzle after the operation. 3111 // The type-legalizer generates this pattern when loading illegal 3112 // vector types from memory. In many cases this allows additional shuffle 3113 // optimizations. 3114 // There are other cases where moving the shuffle after the xor/and/or 3115 // is profitable even if shuffles don't perform a swizzle. 3116 // If both shuffles use the same mask, and both shuffles have the same first 3117 // or second operand, then it might still be profitable to move the shuffle 3118 // after the xor/and/or operation. 3119 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG) { 3120 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(N0); 3121 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(N1); 3122 3123 assert(N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType() && 3124 "Inputs to shuffles are not the same type"); 3125 3126 // Check that both shuffles use the same mask. The masks are known to be of 3127 // the same length because the result vector type is the same. 3128 // Check also that shuffles have only one use to avoid introducing extra 3129 // instructions. 3130 if (SVN0->hasOneUse() && SVN1->hasOneUse() && 3131 SVN0->getMask().equals(SVN1->getMask())) { 3132 SDValue ShOp = N0->getOperand(1); 3133 3134 // Don't try to fold this node if it requires introducing a 3135 // build vector of all zeros that might be illegal at this stage. 3136 if (N->getOpcode() == ISD::XOR && !ShOp.isUndef()) { 3137 if (!LegalTypes) 3138 ShOp = DAG.getConstant(0, SDLoc(N), VT); 3139 else 3140 ShOp = SDValue(); 3141 } 3142 3143 // (AND (shuf (A, C), shuf (B, C)) -> shuf (AND (A, B), C) 3144 // (OR (shuf (A, C), shuf (B, C)) -> shuf (OR (A, B), C) 3145 // (XOR (shuf (A, C), shuf (B, C)) -> shuf (XOR (A, B), V_0) 3146 if (N0.getOperand(1) == N1.getOperand(1) && ShOp.getNode()) { 3147 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 3148 N0->getOperand(0), N1->getOperand(0)); 3149 AddToWorklist(NewNode.getNode()); 3150 return DAG.getVectorShuffle(VT, SDLoc(N), NewNode, ShOp, 3151 SVN0->getMask()); 3152 } 3153 3154 // Don't try to fold this node if it requires introducing a 3155 // build vector of all zeros that might be illegal at this stage. 3156 ShOp = N0->getOperand(0); 3157 if (N->getOpcode() == ISD::XOR && !ShOp.isUndef()) { 3158 if (!LegalTypes) 3159 ShOp = DAG.getConstant(0, SDLoc(N), VT); 3160 else 3161 ShOp = SDValue(); 3162 } 3163 3164 // (AND (shuf (C, A), shuf (C, B)) -> shuf (C, AND (A, B)) 3165 // (OR (shuf (C, A), shuf (C, B)) -> shuf (C, OR (A, B)) 3166 // (XOR (shuf (C, A), shuf (C, B)) -> shuf (V_0, XOR (A, B)) 3167 if (N0->getOperand(0) == N1->getOperand(0) && ShOp.getNode()) { 3168 SDValue NewNode = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 3169 N0->getOperand(1), N1->getOperand(1)); 3170 AddToWorklist(NewNode.getNode()); 3171 return DAG.getVectorShuffle(VT, SDLoc(N), ShOp, NewNode, 3172 SVN0->getMask()); 3173 } 3174 } 3175 } 3176 3177 return SDValue(); 3178 } 3179 3180 /// Try to make (and/or setcc (LL, LR), setcc (RL, RR)) more efficient. 3181 SDValue DAGCombiner::foldLogicOfSetCCs(bool IsAnd, SDValue N0, SDValue N1, 3182 const SDLoc &DL) { 3183 SDValue LL, LR, RL, RR, N0CC, N1CC; 3184 if (!isSetCCEquivalent(N0, LL, LR, N0CC) || 3185 !isSetCCEquivalent(N1, RL, RR, N1CC)) 3186 return SDValue(); 3187 3188 assert(N0.getValueType() == N1.getValueType() && 3189 "Unexpected operand types for bitwise logic op"); 3190 assert(LL.getValueType() == LR.getValueType() && 3191 RL.getValueType() == RR.getValueType() && 3192 "Unexpected operand types for setcc"); 3193 3194 // If we're here post-legalization or the logic op type is not i1, the logic 3195 // op type must match a setcc result type. Also, all folds require new 3196 // operations on the left and right operands, so those types must match. 3197 EVT VT = N0.getValueType(); 3198 EVT OpVT = LL.getValueType(); 3199 if (LegalOperations || VT != MVT::i1) 3200 if (VT != getSetCCResultType(OpVT)) 3201 return SDValue(); 3202 if (OpVT != RL.getValueType()) 3203 return SDValue(); 3204 3205 ISD::CondCode CC0 = cast<CondCodeSDNode>(N0CC)->get(); 3206 ISD::CondCode CC1 = cast<CondCodeSDNode>(N1CC)->get(); 3207 bool IsInteger = OpVT.isInteger(); 3208 if (LR == RR && CC0 == CC1 && IsInteger) { 3209 bool IsZero = isNullConstantOrNullSplatConstant(LR); 3210 bool IsNeg1 = isAllOnesConstantOrAllOnesSplatConstant(LR); 3211 3212 // All bits clear? 3213 bool AndEqZero = IsAnd && CC1 == ISD::SETEQ && IsZero; 3214 // All sign bits clear? 3215 bool AndGtNeg1 = IsAnd && CC1 == ISD::SETGT && IsNeg1; 3216 // Any bits set? 3217 bool OrNeZero = !IsAnd && CC1 == ISD::SETNE && IsZero; 3218 // Any sign bits set? 3219 bool OrLtZero = !IsAnd && CC1 == ISD::SETLT && IsZero; 3220 3221 // (and (seteq X, 0), (seteq Y, 0)) --> (seteq (or X, Y), 0) 3222 // (and (setgt X, -1), (setgt Y, -1)) --> (setgt (or X, Y), -1) 3223 // (or (setne X, 0), (setne Y, 0)) --> (setne (or X, Y), 0) 3224 // (or (setlt X, 0), (setlt Y, 0)) --> (setlt (or X, Y), 0) 3225 if (AndEqZero || AndGtNeg1 || OrNeZero || OrLtZero) { 3226 SDValue Or = DAG.getNode(ISD::OR, SDLoc(N0), OpVT, LL, RL); 3227 AddToWorklist(Or.getNode()); 3228 return DAG.getSetCC(DL, VT, Or, LR, CC1); 3229 } 3230 3231 // All bits set? 3232 bool AndEqNeg1 = IsAnd && CC1 == ISD::SETEQ && IsNeg1; 3233 // All sign bits set? 3234 bool AndLtZero = IsAnd && CC1 == ISD::SETLT && IsZero; 3235 // Any bits clear? 3236 bool OrNeNeg1 = !IsAnd && CC1 == ISD::SETNE && IsNeg1; 3237 // Any sign bits clear? 3238 bool OrGtNeg1 = !IsAnd && CC1 == ISD::SETGT && IsNeg1; 3239 3240 // (and (seteq X, -1), (seteq Y, -1)) --> (seteq (and X, Y), -1) 3241 // (and (setlt X, 0), (setlt Y, 0)) --> (setlt (and X, Y), 0) 3242 // (or (setne X, -1), (setne Y, -1)) --> (setne (and X, Y), -1) 3243 // (or (setgt X, -1), (setgt Y -1)) --> (setgt (and X, Y), -1) 3244 if (AndEqNeg1 || AndLtZero || OrNeNeg1 || OrGtNeg1) { 3245 SDValue And = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, LL, RL); 3246 AddToWorklist(And.getNode()); 3247 return DAG.getSetCC(DL, VT, And, LR, CC1); 3248 } 3249 } 3250 3251 // TODO: What is the 'or' equivalent of this fold? 3252 // (and (setne X, 0), (setne X, -1)) --> (setuge (add X, 1), 2) 3253 if (IsAnd && LL == RL && CC0 == CC1 && IsInteger && CC0 == ISD::SETNE && 3254 ((isNullConstant(LR) && isAllOnesConstant(RR)) || 3255 (isAllOnesConstant(LR) && isNullConstant(RR)))) { 3256 SDValue One = DAG.getConstant(1, DL, OpVT); 3257 SDValue Two = DAG.getConstant(2, DL, OpVT); 3258 SDValue Add = DAG.getNode(ISD::ADD, SDLoc(N0), OpVT, LL, One); 3259 AddToWorklist(Add.getNode()); 3260 return DAG.getSetCC(DL, VT, Add, Two, ISD::SETUGE); 3261 } 3262 3263 // Try more general transforms if the predicates match and the only user of 3264 // the compares is the 'and' or 'or'. 3265 if (IsInteger && TLI.convertSetCCLogicToBitwiseLogic(OpVT) && CC0 == CC1 && 3266 N0.hasOneUse() && N1.hasOneUse()) { 3267 // and (seteq A, B), (seteq C, D) --> seteq (or (xor A, B), (xor C, D)), 0 3268 // or (setne A, B), (setne C, D) --> setne (or (xor A, B), (xor C, D)), 0 3269 if ((IsAnd && CC1 == ISD::SETEQ) || (!IsAnd && CC1 == ISD::SETNE)) { 3270 SDValue XorL = DAG.getNode(ISD::XOR, SDLoc(N0), OpVT, LL, LR); 3271 SDValue XorR = DAG.getNode(ISD::XOR, SDLoc(N1), OpVT, RL, RR); 3272 SDValue Or = DAG.getNode(ISD::OR, DL, OpVT, XorL, XorR); 3273 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3274 return DAG.getSetCC(DL, VT, Or, Zero, CC1); 3275 } 3276 } 3277 3278 // Canonicalize equivalent operands to LL == RL. 3279 if (LL == RR && LR == RL) { 3280 CC1 = ISD::getSetCCSwappedOperands(CC1); 3281 std::swap(RL, RR); 3282 } 3283 3284 // (and (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC) 3285 // (or (setcc X, Y, CC0), (setcc X, Y, CC1)) --> (setcc X, Y, NewCC) 3286 if (LL == RL && LR == RR) { 3287 ISD::CondCode NewCC = IsAnd ? ISD::getSetCCAndOperation(CC0, CC1, IsInteger) 3288 : ISD::getSetCCOrOperation(CC0, CC1, IsInteger); 3289 if (NewCC != ISD::SETCC_INVALID && 3290 (!LegalOperations || 3291 (TLI.isCondCodeLegal(NewCC, LL.getSimpleValueType()) && 3292 TLI.isOperationLegal(ISD::SETCC, OpVT)))) 3293 return DAG.getSetCC(DL, VT, LL, LR, NewCC); 3294 } 3295 3296 return SDValue(); 3297 } 3298 3299 /// This contains all DAGCombine rules which reduce two values combined by 3300 /// an And operation to a single value. This makes them reusable in the context 3301 /// of visitSELECT(). Rules involving constants are not included as 3302 /// visitSELECT() already handles those cases. 3303 SDValue DAGCombiner::visitANDLike(SDValue N0, SDValue N1, SDNode *N) { 3304 EVT VT = N1.getValueType(); 3305 SDLoc DL(N); 3306 3307 // fold (and x, undef) -> 0 3308 if (N0.isUndef() || N1.isUndef()) 3309 return DAG.getConstant(0, DL, VT); 3310 3311 if (SDValue V = foldLogicOfSetCCs(true, N0, N1, DL)) 3312 return V; 3313 3314 if (N0.getOpcode() == ISD::ADD && N1.getOpcode() == ISD::SRL && 3315 VT.getSizeInBits() <= 64) { 3316 if (ConstantSDNode *ADDI = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3317 APInt ADDC = ADDI->getAPIntValue(); 3318 if (!TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 3319 // Look for (and (add x, c1), (lshr y, c2)). If C1 wasn't a legal 3320 // immediate for an add, but it is legal if its top c2 bits are set, 3321 // transform the ADD so the immediate doesn't need to be materialized 3322 // in a register. 3323 if (ConstantSDNode *SRLI = dyn_cast<ConstantSDNode>(N1.getOperand(1))) { 3324 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 3325 SRLI->getZExtValue()); 3326 if (DAG.MaskedValueIsZero(N0.getOperand(1), Mask)) { 3327 ADDC |= Mask; 3328 if (TLI.isLegalAddImmediate(ADDC.getSExtValue())) { 3329 SDLoc DL0(N0); 3330 SDValue NewAdd = 3331 DAG.getNode(ISD::ADD, DL0, VT, 3332 N0.getOperand(0), DAG.getConstant(ADDC, DL, VT)); 3333 CombineTo(N0.getNode(), NewAdd); 3334 // Return N so it doesn't get rechecked! 3335 return SDValue(N, 0); 3336 } 3337 } 3338 } 3339 } 3340 } 3341 } 3342 3343 // Reduce bit extract of low half of an integer to the narrower type. 3344 // (and (srl i64:x, K), KMask) -> 3345 // (i64 zero_extend (and (srl (i32 (trunc i64:x)), K)), KMask) 3346 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 3347 if (ConstantSDNode *CAnd = dyn_cast<ConstantSDNode>(N1)) { 3348 if (ConstantSDNode *CShift = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3349 unsigned Size = VT.getSizeInBits(); 3350 const APInt &AndMask = CAnd->getAPIntValue(); 3351 unsigned ShiftBits = CShift->getZExtValue(); 3352 3353 // Bail out, this node will probably disappear anyway. 3354 if (ShiftBits == 0) 3355 return SDValue(); 3356 3357 unsigned MaskBits = AndMask.countTrailingOnes(); 3358 EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), Size / 2); 3359 3360 if (AndMask.isMask() && 3361 // Required bits must not span the two halves of the integer and 3362 // must fit in the half size type. 3363 (ShiftBits + MaskBits <= Size / 2) && 3364 TLI.isNarrowingProfitable(VT, HalfVT) && 3365 TLI.isTypeDesirableForOp(ISD::AND, HalfVT) && 3366 TLI.isTypeDesirableForOp(ISD::SRL, HalfVT) && 3367 TLI.isTruncateFree(VT, HalfVT) && 3368 TLI.isZExtFree(HalfVT, VT)) { 3369 // The isNarrowingProfitable is to avoid regressions on PPC and 3370 // AArch64 which match a few 64-bit bit insert / bit extract patterns 3371 // on downstream users of this. Those patterns could probably be 3372 // extended to handle extensions mixed in. 3373 3374 SDValue SL(N0); 3375 assert(MaskBits <= Size); 3376 3377 // Extracting the highest bit of the low half. 3378 EVT ShiftVT = TLI.getShiftAmountTy(HalfVT, DAG.getDataLayout()); 3379 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, HalfVT, 3380 N0.getOperand(0)); 3381 3382 SDValue NewMask = DAG.getConstant(AndMask.trunc(Size / 2), SL, HalfVT); 3383 SDValue ShiftK = DAG.getConstant(ShiftBits, SL, ShiftVT); 3384 SDValue Shift = DAG.getNode(ISD::SRL, SL, HalfVT, Trunc, ShiftK); 3385 SDValue And = DAG.getNode(ISD::AND, SL, HalfVT, Shift, NewMask); 3386 return DAG.getNode(ISD::ZERO_EXTEND, SL, VT, And); 3387 } 3388 } 3389 } 3390 } 3391 3392 return SDValue(); 3393 } 3394 3395 bool DAGCombiner::isAndLoadExtLoad(ConstantSDNode *AndC, LoadSDNode *LoadN, 3396 EVT LoadResultTy, EVT &ExtVT, EVT &LoadedVT, 3397 bool &NarrowLoad) { 3398 uint32_t ActiveBits = AndC->getAPIntValue().getActiveBits(); 3399 3400 if (ActiveBits == 0 || !AndC->getAPIntValue().isMask(ActiveBits)) 3401 return false; 3402 3403 ExtVT = EVT::getIntegerVT(*DAG.getContext(), ActiveBits); 3404 LoadedVT = LoadN->getMemoryVT(); 3405 3406 if (ExtVT == LoadedVT && 3407 (!LegalOperations || 3408 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT))) { 3409 // ZEXTLOAD will match without needing to change the size of the value being 3410 // loaded. 3411 NarrowLoad = false; 3412 return true; 3413 } 3414 3415 // Do not change the width of a volatile load. 3416 if (LoadN->isVolatile()) 3417 return false; 3418 3419 // Do not generate loads of non-round integer types since these can 3420 // be expensive (and would be wrong if the type is not byte sized). 3421 if (!LoadedVT.bitsGT(ExtVT) || !ExtVT.isRound()) 3422 return false; 3423 3424 if (LegalOperations && 3425 !TLI.isLoadExtLegal(ISD::ZEXTLOAD, LoadResultTy, ExtVT)) 3426 return false; 3427 3428 if (!TLI.shouldReduceLoadWidth(LoadN, ISD::ZEXTLOAD, ExtVT)) 3429 return false; 3430 3431 NarrowLoad = true; 3432 return true; 3433 } 3434 3435 SDValue DAGCombiner::visitAND(SDNode *N) { 3436 SDValue N0 = N->getOperand(0); 3437 SDValue N1 = N->getOperand(1); 3438 EVT VT = N1.getValueType(); 3439 3440 // x & x --> x 3441 if (N0 == N1) 3442 return N0; 3443 3444 // fold vector ops 3445 if (VT.isVector()) { 3446 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 3447 return FoldedVOp; 3448 3449 // fold (and x, 0) -> 0, vector edition 3450 if (ISD::isBuildVectorAllZeros(N0.getNode())) 3451 // do not return N0, because undef node may exist in N0 3452 return DAG.getConstant(APInt::getNullValue(N0.getScalarValueSizeInBits()), 3453 SDLoc(N), N0.getValueType()); 3454 if (ISD::isBuildVectorAllZeros(N1.getNode())) 3455 // do not return N1, because undef node may exist in N1 3456 return DAG.getConstant(APInt::getNullValue(N1.getScalarValueSizeInBits()), 3457 SDLoc(N), N1.getValueType()); 3458 3459 // fold (and x, -1) -> x, vector edition 3460 if (ISD::isBuildVectorAllOnes(N0.getNode())) 3461 return N1; 3462 if (ISD::isBuildVectorAllOnes(N1.getNode())) 3463 return N0; 3464 } 3465 3466 // fold (and c1, c2) -> c1&c2 3467 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 3468 ConstantSDNode *N1C = isConstOrConstSplat(N1); 3469 if (N0C && N1C && !N1C->isOpaque()) 3470 return DAG.FoldConstantArithmetic(ISD::AND, SDLoc(N), VT, N0C, N1C); 3471 // canonicalize constant to RHS 3472 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 3473 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 3474 return DAG.getNode(ISD::AND, SDLoc(N), VT, N1, N0); 3475 // fold (and x, -1) -> x 3476 if (isAllOnesConstant(N1)) 3477 return N0; 3478 // if (and x, c) is known to be zero, return 0 3479 unsigned BitWidth = VT.getScalarSizeInBits(); 3480 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 3481 APInt::getAllOnesValue(BitWidth))) 3482 return DAG.getConstant(0, SDLoc(N), VT); 3483 3484 if (SDValue NewSel = foldBinOpIntoSelect(N)) 3485 return NewSel; 3486 3487 // reassociate and 3488 if (SDValue RAND = ReassociateOps(ISD::AND, SDLoc(N), N0, N1)) 3489 return RAND; 3490 // fold (and (or x, C), D) -> D if (C & D) == D 3491 if (N1C && N0.getOpcode() == ISD::OR) 3492 if (ConstantSDNode *ORI = isConstOrConstSplat(N0.getOperand(1))) 3493 if ((ORI->getAPIntValue() & N1C->getAPIntValue()) == N1C->getAPIntValue()) 3494 return N1; 3495 // fold (and (any_ext V), c) -> (zero_ext V) if 'and' only clears top bits. 3496 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 3497 SDValue N0Op0 = N0.getOperand(0); 3498 APInt Mask = ~N1C->getAPIntValue(); 3499 Mask = Mask.trunc(N0Op0.getScalarValueSizeInBits()); 3500 if (DAG.MaskedValueIsZero(N0Op0, Mask)) { 3501 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), 3502 N0.getValueType(), N0Op0); 3503 3504 // Replace uses of the AND with uses of the Zero extend node. 3505 CombineTo(N, Zext); 3506 3507 // We actually want to replace all uses of the any_extend with the 3508 // zero_extend, to avoid duplicating things. This will later cause this 3509 // AND to be folded. 3510 CombineTo(N0.getNode(), Zext); 3511 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3512 } 3513 } 3514 // similarly fold (and (X (load ([non_ext|any_ext|zero_ext] V))), c) -> 3515 // (X (load ([non_ext|zero_ext] V))) if 'and' only clears top bits which must 3516 // already be zero by virtue of the width of the base type of the load. 3517 // 3518 // the 'X' node here can either be nothing or an extract_vector_elt to catch 3519 // more cases. 3520 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 3521 N0.getValueSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits() && 3522 N0.getOperand(0).getOpcode() == ISD::LOAD && 3523 N0.getOperand(0).getResNo() == 0) || 3524 (N0.getOpcode() == ISD::LOAD && N0.getResNo() == 0)) { 3525 LoadSDNode *Load = cast<LoadSDNode>( (N0.getOpcode() == ISD::LOAD) ? 3526 N0 : N0.getOperand(0) ); 3527 3528 // Get the constant (if applicable) the zero'th operand is being ANDed with. 3529 // This can be a pure constant or a vector splat, in which case we treat the 3530 // vector as a scalar and use the splat value. 3531 APInt Constant = APInt::getNullValue(1); 3532 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 3533 Constant = C->getAPIntValue(); 3534 } else if (BuildVectorSDNode *Vector = dyn_cast<BuildVectorSDNode>(N1)) { 3535 APInt SplatValue, SplatUndef; 3536 unsigned SplatBitSize; 3537 bool HasAnyUndefs; 3538 bool IsSplat = Vector->isConstantSplat(SplatValue, SplatUndef, 3539 SplatBitSize, HasAnyUndefs); 3540 if (IsSplat) { 3541 // Undef bits can contribute to a possible optimisation if set, so 3542 // set them. 3543 SplatValue |= SplatUndef; 3544 3545 // The splat value may be something like "0x00FFFFFF", which means 0 for 3546 // the first vector value and FF for the rest, repeating. We need a mask 3547 // that will apply equally to all members of the vector, so AND all the 3548 // lanes of the constant together. 3549 EVT VT = Vector->getValueType(0); 3550 unsigned BitWidth = VT.getScalarSizeInBits(); 3551 3552 // If the splat value has been compressed to a bitlength lower 3553 // than the size of the vector lane, we need to re-expand it to 3554 // the lane size. 3555 if (BitWidth > SplatBitSize) 3556 for (SplatValue = SplatValue.zextOrTrunc(BitWidth); 3557 SplatBitSize < BitWidth; 3558 SplatBitSize = SplatBitSize * 2) 3559 SplatValue |= SplatValue.shl(SplatBitSize); 3560 3561 // Make sure that variable 'Constant' is only set if 'SplatBitSize' is a 3562 // multiple of 'BitWidth'. Otherwise, we could propagate a wrong value. 3563 if (SplatBitSize % BitWidth == 0) { 3564 Constant = APInt::getAllOnesValue(BitWidth); 3565 for (unsigned i = 0, n = SplatBitSize/BitWidth; i < n; ++i) 3566 Constant &= SplatValue.lshr(i*BitWidth).zextOrTrunc(BitWidth); 3567 } 3568 } 3569 } 3570 3571 // If we want to change an EXTLOAD to a ZEXTLOAD, ensure a ZEXTLOAD is 3572 // actually legal and isn't going to get expanded, else this is a false 3573 // optimisation. 3574 bool CanZextLoadProfitably = TLI.isLoadExtLegal(ISD::ZEXTLOAD, 3575 Load->getValueType(0), 3576 Load->getMemoryVT()); 3577 3578 // Resize the constant to the same size as the original memory access before 3579 // extension. If it is still the AllOnesValue then this AND is completely 3580 // unneeded. 3581 Constant = Constant.zextOrTrunc(Load->getMemoryVT().getScalarSizeInBits()); 3582 3583 bool B; 3584 switch (Load->getExtensionType()) { 3585 default: B = false; break; 3586 case ISD::EXTLOAD: B = CanZextLoadProfitably; break; 3587 case ISD::ZEXTLOAD: 3588 case ISD::NON_EXTLOAD: B = true; break; 3589 } 3590 3591 if (B && Constant.isAllOnesValue()) { 3592 // If the load type was an EXTLOAD, convert to ZEXTLOAD in order to 3593 // preserve semantics once we get rid of the AND. 3594 SDValue NewLoad(Load, 0); 3595 3596 // Fold the AND away. NewLoad may get replaced immediately. 3597 CombineTo(N, (N0.getNode() == Load) ? NewLoad : N0); 3598 3599 if (Load->getExtensionType() == ISD::EXTLOAD) { 3600 NewLoad = DAG.getLoad(Load->getAddressingMode(), ISD::ZEXTLOAD, 3601 Load->getValueType(0), SDLoc(Load), 3602 Load->getChain(), Load->getBasePtr(), 3603 Load->getOffset(), Load->getMemoryVT(), 3604 Load->getMemOperand()); 3605 // Replace uses of the EXTLOAD with the new ZEXTLOAD. 3606 if (Load->getNumValues() == 3) { 3607 // PRE/POST_INC loads have 3 values. 3608 SDValue To[] = { NewLoad.getValue(0), NewLoad.getValue(1), 3609 NewLoad.getValue(2) }; 3610 CombineTo(Load, To, 3, true); 3611 } else { 3612 CombineTo(Load, NewLoad.getValue(0), NewLoad.getValue(1)); 3613 } 3614 } 3615 3616 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3617 } 3618 } 3619 3620 // fold (and (load x), 255) -> (zextload x, i8) 3621 // fold (and (extload x, i16), 255) -> (zextload x, i8) 3622 // fold (and (any_ext (extload x, i16)), 255) -> (zextload x, i8) 3623 if (!VT.isVector() && N1C && (N0.getOpcode() == ISD::LOAD || 3624 (N0.getOpcode() == ISD::ANY_EXTEND && 3625 N0.getOperand(0).getOpcode() == ISD::LOAD))) { 3626 bool HasAnyExt = N0.getOpcode() == ISD::ANY_EXTEND; 3627 LoadSDNode *LN0 = HasAnyExt 3628 ? cast<LoadSDNode>(N0.getOperand(0)) 3629 : cast<LoadSDNode>(N0); 3630 if (LN0->getExtensionType() != ISD::SEXTLOAD && 3631 LN0->isUnindexed() && N0.hasOneUse() && SDValue(LN0, 0).hasOneUse()) { 3632 auto NarrowLoad = false; 3633 EVT LoadResultTy = HasAnyExt ? LN0->getValueType(0) : VT; 3634 EVT ExtVT, LoadedVT; 3635 if (isAndLoadExtLoad(N1C, LN0, LoadResultTy, ExtVT, LoadedVT, 3636 NarrowLoad)) { 3637 if (!NarrowLoad) { 3638 SDValue NewLoad = 3639 DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy, 3640 LN0->getChain(), LN0->getBasePtr(), ExtVT, 3641 LN0->getMemOperand()); 3642 AddToWorklist(N); 3643 CombineTo(LN0, NewLoad, NewLoad.getValue(1)); 3644 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3645 } else { 3646 EVT PtrType = LN0->getOperand(1).getValueType(); 3647 3648 unsigned Alignment = LN0->getAlignment(); 3649 SDValue NewPtr = LN0->getBasePtr(); 3650 3651 // For big endian targets, we need to add an offset to the pointer 3652 // to load the correct bytes. For little endian systems, we merely 3653 // need to read fewer bytes from the same pointer. 3654 if (DAG.getDataLayout().isBigEndian()) { 3655 unsigned LVTStoreBytes = LoadedVT.getStoreSize(); 3656 unsigned EVTStoreBytes = ExtVT.getStoreSize(); 3657 unsigned PtrOff = LVTStoreBytes - EVTStoreBytes; 3658 SDLoc DL(LN0); 3659 NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, 3660 NewPtr, DAG.getConstant(PtrOff, DL, PtrType)); 3661 Alignment = MinAlign(Alignment, PtrOff); 3662 } 3663 3664 AddToWorklist(NewPtr.getNode()); 3665 3666 SDValue Load = DAG.getExtLoad( 3667 ISD::ZEXTLOAD, SDLoc(LN0), LoadResultTy, LN0->getChain(), NewPtr, 3668 LN0->getPointerInfo(), ExtVT, Alignment, 3669 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 3670 AddToWorklist(N); 3671 CombineTo(LN0, Load, Load.getValue(1)); 3672 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3673 } 3674 } 3675 } 3676 } 3677 3678 if (SDValue Combined = visitANDLike(N0, N1, N)) 3679 return Combined; 3680 3681 // Simplify: (and (op x...), (op y...)) -> (op (and x, y)) 3682 if (N0.getOpcode() == N1.getOpcode()) 3683 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N)) 3684 return Tmp; 3685 3686 // Masking the negated extension of a boolean is just the zero-extended 3687 // boolean: 3688 // and (sub 0, zext(bool X)), 1 --> zext(bool X) 3689 // and (sub 0, sext(bool X)), 1 --> zext(bool X) 3690 // 3691 // Note: the SimplifyDemandedBits fold below can make an information-losing 3692 // transform, and then we have no way to find this better fold. 3693 if (N1C && N1C->isOne() && N0.getOpcode() == ISD::SUB) { 3694 ConstantSDNode *SubLHS = isConstOrConstSplat(N0.getOperand(0)); 3695 SDValue SubRHS = N0.getOperand(1); 3696 if (SubLHS && SubLHS->isNullValue()) { 3697 if (SubRHS.getOpcode() == ISD::ZERO_EXTEND && 3698 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1) 3699 return SubRHS; 3700 if (SubRHS.getOpcode() == ISD::SIGN_EXTEND && 3701 SubRHS.getOperand(0).getScalarValueSizeInBits() == 1) 3702 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, SubRHS.getOperand(0)); 3703 } 3704 } 3705 3706 // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1) 3707 // fold (and (sra)) -> (and (srl)) when possible. 3708 if (!VT.isVector() && SimplifyDemandedBits(SDValue(N, 0))) 3709 return SDValue(N, 0); 3710 3711 // fold (zext_inreg (extload x)) -> (zextload x) 3712 if (ISD::isEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode())) { 3713 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 3714 EVT MemVT = LN0->getMemoryVT(); 3715 // If we zero all the possible extended bits, then we can turn this into 3716 // a zextload if we are running before legalize or the operation is legal. 3717 unsigned BitWidth = N1.getScalarValueSizeInBits(); 3718 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 3719 BitWidth - MemVT.getScalarSizeInBits())) && 3720 ((!LegalOperations && !LN0->isVolatile()) || 3721 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) { 3722 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, 3723 LN0->getChain(), LN0->getBasePtr(), 3724 MemVT, LN0->getMemOperand()); 3725 AddToWorklist(N); 3726 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 3727 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3728 } 3729 } 3730 // fold (zext_inreg (sextload x)) -> (zextload x) iff load has one use 3731 if (ISD::isSEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 3732 N0.hasOneUse()) { 3733 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 3734 EVT MemVT = LN0->getMemoryVT(); 3735 // If we zero all the possible extended bits, then we can turn this into 3736 // a zextload if we are running before legalize or the operation is legal. 3737 unsigned BitWidth = N1.getScalarValueSizeInBits(); 3738 if (DAG.MaskedValueIsZero(N1, APInt::getHighBitsSet(BitWidth, 3739 BitWidth - MemVT.getScalarSizeInBits())) && 3740 ((!LegalOperations && !LN0->isVolatile()) || 3741 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT))) { 3742 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N0), VT, 3743 LN0->getChain(), LN0->getBasePtr(), 3744 MemVT, LN0->getMemOperand()); 3745 AddToWorklist(N); 3746 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 3747 return SDValue(N, 0); // Return N so it doesn't get rechecked! 3748 } 3749 } 3750 // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const) 3751 if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) { 3752 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 3753 N0.getOperand(1), false)) 3754 return BSwap; 3755 } 3756 3757 return SDValue(); 3758 } 3759 3760 /// Match (a >> 8) | (a << 8) as (bswap a) >> 16. 3761 SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1, 3762 bool DemandHighBits) { 3763 if (!LegalOperations) 3764 return SDValue(); 3765 3766 EVT VT = N->getValueType(0); 3767 if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16) 3768 return SDValue(); 3769 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 3770 return SDValue(); 3771 3772 // Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00) 3773 bool LookPassAnd0 = false; 3774 bool LookPassAnd1 = false; 3775 if (N0.getOpcode() == ISD::AND && N0.getOperand(0).getOpcode() == ISD::SRL) 3776 std::swap(N0, N1); 3777 if (N1.getOpcode() == ISD::AND && N1.getOperand(0).getOpcode() == ISD::SHL) 3778 std::swap(N0, N1); 3779 if (N0.getOpcode() == ISD::AND) { 3780 if (!N0.getNode()->hasOneUse()) 3781 return SDValue(); 3782 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3783 if (!N01C || N01C->getZExtValue() != 0xFF00) 3784 return SDValue(); 3785 N0 = N0.getOperand(0); 3786 LookPassAnd0 = true; 3787 } 3788 3789 if (N1.getOpcode() == ISD::AND) { 3790 if (!N1.getNode()->hasOneUse()) 3791 return SDValue(); 3792 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 3793 if (!N11C || N11C->getZExtValue() != 0xFF) 3794 return SDValue(); 3795 N1 = N1.getOperand(0); 3796 LookPassAnd1 = true; 3797 } 3798 3799 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL) 3800 std::swap(N0, N1); 3801 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL) 3802 return SDValue(); 3803 if (!N0.getNode()->hasOneUse() || !N1.getNode()->hasOneUse()) 3804 return SDValue(); 3805 3806 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3807 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1)); 3808 if (!N01C || !N11C) 3809 return SDValue(); 3810 if (N01C->getZExtValue() != 8 || N11C->getZExtValue() != 8) 3811 return SDValue(); 3812 3813 // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8) 3814 SDValue N00 = N0->getOperand(0); 3815 if (!LookPassAnd0 && N00.getOpcode() == ISD::AND) { 3816 if (!N00.getNode()->hasOneUse()) 3817 return SDValue(); 3818 ConstantSDNode *N001C = dyn_cast<ConstantSDNode>(N00.getOperand(1)); 3819 if (!N001C || N001C->getZExtValue() != 0xFF) 3820 return SDValue(); 3821 N00 = N00.getOperand(0); 3822 LookPassAnd0 = true; 3823 } 3824 3825 SDValue N10 = N1->getOperand(0); 3826 if (!LookPassAnd1 && N10.getOpcode() == ISD::AND) { 3827 if (!N10.getNode()->hasOneUse()) 3828 return SDValue(); 3829 ConstantSDNode *N101C = dyn_cast<ConstantSDNode>(N10.getOperand(1)); 3830 if (!N101C || N101C->getZExtValue() != 0xFF00) 3831 return SDValue(); 3832 N10 = N10.getOperand(0); 3833 LookPassAnd1 = true; 3834 } 3835 3836 if (N00 != N10) 3837 return SDValue(); 3838 3839 // Make sure everything beyond the low halfword gets set to zero since the SRL 3840 // 16 will clear the top bits. 3841 unsigned OpSizeInBits = VT.getSizeInBits(); 3842 if (DemandHighBits && OpSizeInBits > 16) { 3843 // If the left-shift isn't masked out then the only way this is a bswap is 3844 // if all bits beyond the low 8 are 0. In that case the entire pattern 3845 // reduces to a left shift anyway: leave it for other parts of the combiner. 3846 if (!LookPassAnd0) 3847 return SDValue(); 3848 3849 // However, if the right shift isn't masked out then it might be because 3850 // it's not needed. See if we can spot that too. 3851 if (!LookPassAnd1 && 3852 !DAG.MaskedValueIsZero( 3853 N10, APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - 16))) 3854 return SDValue(); 3855 } 3856 3857 SDValue Res = DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N00); 3858 if (OpSizeInBits > 16) { 3859 SDLoc DL(N); 3860 Res = DAG.getNode(ISD::SRL, DL, VT, Res, 3861 DAG.getConstant(OpSizeInBits - 16, DL, 3862 getShiftAmountTy(VT))); 3863 } 3864 return Res; 3865 } 3866 3867 /// Return true if the specified node is an element that makes up a 32-bit 3868 /// packed halfword byteswap. 3869 /// ((x & 0x000000ff) << 8) | 3870 /// ((x & 0x0000ff00) >> 8) | 3871 /// ((x & 0x00ff0000) << 8) | 3872 /// ((x & 0xff000000) >> 8) 3873 static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) { 3874 if (!N.getNode()->hasOneUse()) 3875 return false; 3876 3877 unsigned Opc = N.getOpcode(); 3878 if (Opc != ISD::AND && Opc != ISD::SHL && Opc != ISD::SRL) 3879 return false; 3880 3881 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3882 if (!N1C) 3883 return false; 3884 3885 unsigned Num; 3886 switch (N1C->getZExtValue()) { 3887 default: 3888 return false; 3889 case 0xFF: Num = 0; break; 3890 case 0xFF00: Num = 1; break; 3891 case 0xFF0000: Num = 2; break; 3892 case 0xFF000000: Num = 3; break; 3893 } 3894 3895 // Look for (x & 0xff) << 8 as well as ((x << 8) & 0xff00). 3896 SDValue N0 = N.getOperand(0); 3897 if (Opc == ISD::AND) { 3898 if (Num == 0 || Num == 2) { 3899 // (x >> 8) & 0xff 3900 // (x >> 8) & 0xff0000 3901 if (N0.getOpcode() != ISD::SRL) 3902 return false; 3903 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3904 if (!C || C->getZExtValue() != 8) 3905 return false; 3906 } else { 3907 // (x << 8) & 0xff00 3908 // (x << 8) & 0xff000000 3909 if (N0.getOpcode() != ISD::SHL) 3910 return false; 3911 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 3912 if (!C || C->getZExtValue() != 8) 3913 return false; 3914 } 3915 } else if (Opc == ISD::SHL) { 3916 // (x & 0xff) << 8 3917 // (x & 0xff0000) << 8 3918 if (Num != 0 && Num != 2) 3919 return false; 3920 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3921 if (!C || C->getZExtValue() != 8) 3922 return false; 3923 } else { // Opc == ISD::SRL 3924 // (x & 0xff00) >> 8 3925 // (x & 0xff000000) >> 8 3926 if (Num != 1 && Num != 3) 3927 return false; 3928 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N.getOperand(1)); 3929 if (!C || C->getZExtValue() != 8) 3930 return false; 3931 } 3932 3933 if (Parts[Num]) 3934 return false; 3935 3936 Parts[Num] = N0.getOperand(0).getNode(); 3937 return true; 3938 } 3939 3940 /// Match a 32-bit packed halfword bswap. That is 3941 /// ((x & 0x000000ff) << 8) | 3942 /// ((x & 0x0000ff00) >> 8) | 3943 /// ((x & 0x00ff0000) << 8) | 3944 /// ((x & 0xff000000) >> 8) 3945 /// => (rotl (bswap x), 16) 3946 SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) { 3947 if (!LegalOperations) 3948 return SDValue(); 3949 3950 EVT VT = N->getValueType(0); 3951 if (VT != MVT::i32) 3952 return SDValue(); 3953 if (!TLI.isOperationLegal(ISD::BSWAP, VT)) 3954 return SDValue(); 3955 3956 // Look for either 3957 // (or (or (and), (and)), (or (and), (and))) 3958 // (or (or (or (and), (and)), (and)), (and)) 3959 if (N0.getOpcode() != ISD::OR) 3960 return SDValue(); 3961 SDValue N00 = N0.getOperand(0); 3962 SDValue N01 = N0.getOperand(1); 3963 SDNode *Parts[4] = {}; 3964 3965 if (N1.getOpcode() == ISD::OR && 3966 N00.getNumOperands() == 2 && N01.getNumOperands() == 2) { 3967 // (or (or (and), (and)), (or (and), (and))) 3968 SDValue N000 = N00.getOperand(0); 3969 if (!isBSwapHWordElement(N000, Parts)) 3970 return SDValue(); 3971 3972 SDValue N001 = N00.getOperand(1); 3973 if (!isBSwapHWordElement(N001, Parts)) 3974 return SDValue(); 3975 SDValue N010 = N01.getOperand(0); 3976 if (!isBSwapHWordElement(N010, Parts)) 3977 return SDValue(); 3978 SDValue N011 = N01.getOperand(1); 3979 if (!isBSwapHWordElement(N011, Parts)) 3980 return SDValue(); 3981 } else { 3982 // (or (or (or (and), (and)), (and)), (and)) 3983 if (!isBSwapHWordElement(N1, Parts)) 3984 return SDValue(); 3985 if (!isBSwapHWordElement(N01, Parts)) 3986 return SDValue(); 3987 if (N00.getOpcode() != ISD::OR) 3988 return SDValue(); 3989 SDValue N000 = N00.getOperand(0); 3990 if (!isBSwapHWordElement(N000, Parts)) 3991 return SDValue(); 3992 SDValue N001 = N00.getOperand(1); 3993 if (!isBSwapHWordElement(N001, Parts)) 3994 return SDValue(); 3995 } 3996 3997 // Make sure the parts are all coming from the same node. 3998 if (Parts[0] != Parts[1] || Parts[0] != Parts[2] || Parts[0] != Parts[3]) 3999 return SDValue(); 4000 4001 SDLoc DL(N); 4002 SDValue BSwap = DAG.getNode(ISD::BSWAP, DL, VT, 4003 SDValue(Parts[0], 0)); 4004 4005 // Result of the bswap should be rotated by 16. If it's not legal, then 4006 // do (x << 16) | (x >> 16). 4007 SDValue ShAmt = DAG.getConstant(16, DL, getShiftAmountTy(VT)); 4008 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT)) 4009 return DAG.getNode(ISD::ROTL, DL, VT, BSwap, ShAmt); 4010 if (TLI.isOperationLegalOrCustom(ISD::ROTR, VT)) 4011 return DAG.getNode(ISD::ROTR, DL, VT, BSwap, ShAmt); 4012 return DAG.getNode(ISD::OR, DL, VT, 4013 DAG.getNode(ISD::SHL, DL, VT, BSwap, ShAmt), 4014 DAG.getNode(ISD::SRL, DL, VT, BSwap, ShAmt)); 4015 } 4016 4017 /// This contains all DAGCombine rules which reduce two values combined by 4018 /// an Or operation to a single value \see visitANDLike(). 4019 SDValue DAGCombiner::visitORLike(SDValue N0, SDValue N1, SDNode *N) { 4020 EVT VT = N1.getValueType(); 4021 SDLoc DL(N); 4022 4023 // fold (or x, undef) -> -1 4024 if (!LegalOperations && (N0.isUndef() || N1.isUndef())) 4025 return DAG.getAllOnesConstant(DL, VT); 4026 4027 if (SDValue V = foldLogicOfSetCCs(false, N0, N1, DL)) 4028 return V; 4029 4030 // (or (and X, C1), (and Y, C2)) -> (and (or X, Y), C3) if possible. 4031 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == ISD::AND && 4032 // Don't increase # computations. 4033 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) { 4034 // We can only do this xform if we know that bits from X that are set in C2 4035 // but not in C1 are already zero. Likewise for Y. 4036 if (const ConstantSDNode *N0O1C = 4037 getAsNonOpaqueConstant(N0.getOperand(1))) { 4038 if (const ConstantSDNode *N1O1C = 4039 getAsNonOpaqueConstant(N1.getOperand(1))) { 4040 // We can only do this xform if we know that bits from X that are set in 4041 // C2 but not in C1 are already zero. Likewise for Y. 4042 const APInt &LHSMask = N0O1C->getAPIntValue(); 4043 const APInt &RHSMask = N1O1C->getAPIntValue(); 4044 4045 if (DAG.MaskedValueIsZero(N0.getOperand(0), RHSMask&~LHSMask) && 4046 DAG.MaskedValueIsZero(N1.getOperand(0), LHSMask&~RHSMask)) { 4047 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT, 4048 N0.getOperand(0), N1.getOperand(0)); 4049 return DAG.getNode(ISD::AND, DL, VT, X, 4050 DAG.getConstant(LHSMask | RHSMask, DL, VT)); 4051 } 4052 } 4053 } 4054 } 4055 4056 // (or (and X, M), (and X, N)) -> (and X, (or M, N)) 4057 if (N0.getOpcode() == ISD::AND && 4058 N1.getOpcode() == ISD::AND && 4059 N0.getOperand(0) == N1.getOperand(0) && 4060 // Don't increase # computations. 4061 (N0.getNode()->hasOneUse() || N1.getNode()->hasOneUse())) { 4062 SDValue X = DAG.getNode(ISD::OR, SDLoc(N0), VT, 4063 N0.getOperand(1), N1.getOperand(1)); 4064 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), X); 4065 } 4066 4067 return SDValue(); 4068 } 4069 4070 SDValue DAGCombiner::visitOR(SDNode *N) { 4071 SDValue N0 = N->getOperand(0); 4072 SDValue N1 = N->getOperand(1); 4073 EVT VT = N1.getValueType(); 4074 4075 // x | x --> x 4076 if (N0 == N1) 4077 return N0; 4078 4079 // fold vector ops 4080 if (VT.isVector()) { 4081 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 4082 return FoldedVOp; 4083 4084 // fold (or x, 0) -> x, vector edition 4085 if (ISD::isBuildVectorAllZeros(N0.getNode())) 4086 return N1; 4087 if (ISD::isBuildVectorAllZeros(N1.getNode())) 4088 return N0; 4089 4090 // fold (or x, -1) -> -1, vector edition 4091 if (ISD::isBuildVectorAllOnes(N0.getNode())) 4092 // do not return N0, because undef node may exist in N0 4093 return DAG.getAllOnesConstant(SDLoc(N), N0.getValueType()); 4094 if (ISD::isBuildVectorAllOnes(N1.getNode())) 4095 // do not return N1, because undef node may exist in N1 4096 return DAG.getAllOnesConstant(SDLoc(N), N1.getValueType()); 4097 4098 // fold (or (shuf A, V_0, MA), (shuf B, V_0, MB)) -> (shuf A, B, Mask) 4099 // Do this only if the resulting shuffle is legal. 4100 if (isa<ShuffleVectorSDNode>(N0) && 4101 isa<ShuffleVectorSDNode>(N1) && 4102 // Avoid folding a node with illegal type. 4103 TLI.isTypeLegal(VT)) { 4104 bool ZeroN00 = ISD::isBuildVectorAllZeros(N0.getOperand(0).getNode()); 4105 bool ZeroN01 = ISD::isBuildVectorAllZeros(N0.getOperand(1).getNode()); 4106 bool ZeroN10 = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); 4107 bool ZeroN11 = ISD::isBuildVectorAllZeros(N1.getOperand(1).getNode()); 4108 // Ensure both shuffles have a zero input. 4109 if ((ZeroN00 != ZeroN01) && (ZeroN10 != ZeroN11)) { 4110 assert((!ZeroN00 || !ZeroN01) && "Both inputs zero!"); 4111 assert((!ZeroN10 || !ZeroN11) && "Both inputs zero!"); 4112 const ShuffleVectorSDNode *SV0 = cast<ShuffleVectorSDNode>(N0); 4113 const ShuffleVectorSDNode *SV1 = cast<ShuffleVectorSDNode>(N1); 4114 bool CanFold = true; 4115 int NumElts = VT.getVectorNumElements(); 4116 SmallVector<int, 4> Mask(NumElts); 4117 4118 for (int i = 0; i != NumElts; ++i) { 4119 int M0 = SV0->getMaskElt(i); 4120 int M1 = SV1->getMaskElt(i); 4121 4122 // Determine if either index is pointing to a zero vector. 4123 bool M0Zero = M0 < 0 || (ZeroN00 == (M0 < NumElts)); 4124 bool M1Zero = M1 < 0 || (ZeroN10 == (M1 < NumElts)); 4125 4126 // If one element is zero and the otherside is undef, keep undef. 4127 // This also handles the case that both are undef. 4128 if ((M0Zero && M1 < 0) || (M1Zero && M0 < 0)) { 4129 Mask[i] = -1; 4130 continue; 4131 } 4132 4133 // Make sure only one of the elements is zero. 4134 if (M0Zero == M1Zero) { 4135 CanFold = false; 4136 break; 4137 } 4138 4139 assert((M0 >= 0 || M1 >= 0) && "Undef index!"); 4140 4141 // We have a zero and non-zero element. If the non-zero came from 4142 // SV0 make the index a LHS index. If it came from SV1, make it 4143 // a RHS index. We need to mod by NumElts because we don't care 4144 // which operand it came from in the original shuffles. 4145 Mask[i] = M1Zero ? M0 % NumElts : (M1 % NumElts) + NumElts; 4146 } 4147 4148 if (CanFold) { 4149 SDValue NewLHS = ZeroN00 ? N0.getOperand(1) : N0.getOperand(0); 4150 SDValue NewRHS = ZeroN10 ? N1.getOperand(1) : N1.getOperand(0); 4151 4152 bool LegalMask = TLI.isShuffleMaskLegal(Mask, VT); 4153 if (!LegalMask) { 4154 std::swap(NewLHS, NewRHS); 4155 ShuffleVectorSDNode::commuteMask(Mask); 4156 LegalMask = TLI.isShuffleMaskLegal(Mask, VT); 4157 } 4158 4159 if (LegalMask) 4160 return DAG.getVectorShuffle(VT, SDLoc(N), NewLHS, NewRHS, Mask); 4161 } 4162 } 4163 } 4164 } 4165 4166 // fold (or c1, c2) -> c1|c2 4167 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 4168 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1); 4169 if (N0C && N1C && !N1C->isOpaque()) 4170 return DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N), VT, N0C, N1C); 4171 // canonicalize constant to RHS 4172 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 4173 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 4174 return DAG.getNode(ISD::OR, SDLoc(N), VT, N1, N0); 4175 // fold (or x, 0) -> x 4176 if (isNullConstant(N1)) 4177 return N0; 4178 // fold (or x, -1) -> -1 4179 if (isAllOnesConstant(N1)) 4180 return N1; 4181 4182 if (SDValue NewSel = foldBinOpIntoSelect(N)) 4183 return NewSel; 4184 4185 // fold (or x, c) -> c iff (x & ~c) == 0 4186 if (N1C && DAG.MaskedValueIsZero(N0, ~N1C->getAPIntValue())) 4187 return N1; 4188 4189 if (SDValue Combined = visitORLike(N0, N1, N)) 4190 return Combined; 4191 4192 // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16) 4193 if (SDValue BSwap = MatchBSwapHWord(N, N0, N1)) 4194 return BSwap; 4195 if (SDValue BSwap = MatchBSwapHWordLow(N, N0, N1)) 4196 return BSwap; 4197 4198 // reassociate or 4199 if (SDValue ROR = ReassociateOps(ISD::OR, SDLoc(N), N0, N1)) 4200 return ROR; 4201 // Canonicalize (or (and X, c1), c2) -> (and (or X, c2), c1|c2) 4202 // iff (c1 & c2) != 0. 4203 if (N1C && N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 4204 isa<ConstantSDNode>(N0.getOperand(1))) { 4205 ConstantSDNode *C1 = cast<ConstantSDNode>(N0.getOperand(1)); 4206 if ((C1->getAPIntValue() & N1C->getAPIntValue()) != 0) { 4207 if (SDValue COR = DAG.FoldConstantArithmetic(ISD::OR, SDLoc(N1), VT, 4208 N1C, C1)) 4209 return DAG.getNode( 4210 ISD::AND, SDLoc(N), VT, 4211 DAG.getNode(ISD::OR, SDLoc(N0), VT, N0.getOperand(0), N1), COR); 4212 return SDValue(); 4213 } 4214 } 4215 // Simplify: (or (op x...), (op y...)) -> (op (or x, y)) 4216 if (N0.getOpcode() == N1.getOpcode()) 4217 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N)) 4218 return Tmp; 4219 4220 // See if this is some rotate idiom. 4221 if (SDNode *Rot = MatchRotate(N0, N1, SDLoc(N))) 4222 return SDValue(Rot, 0); 4223 4224 if (SDValue Load = MatchLoadCombine(N)) 4225 return Load; 4226 4227 // Simplify the operands using demanded-bits information. 4228 if (!VT.isVector() && 4229 SimplifyDemandedBits(SDValue(N, 0))) 4230 return SDValue(N, 0); 4231 4232 return SDValue(); 4233 } 4234 4235 /// Match "(X shl/srl V1) & V2" where V2 may not be present. 4236 bool DAGCombiner::MatchRotateHalf(SDValue Op, SDValue &Shift, SDValue &Mask) { 4237 if (Op.getOpcode() == ISD::AND) { 4238 if (DAG.isConstantIntBuildVectorOrConstantInt(Op.getOperand(1))) { 4239 Mask = Op.getOperand(1); 4240 Op = Op.getOperand(0); 4241 } else { 4242 return false; 4243 } 4244 } 4245 4246 if (Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) { 4247 Shift = Op; 4248 return true; 4249 } 4250 4251 return false; 4252 } 4253 4254 // Return true if we can prove that, whenever Neg and Pos are both in the 4255 // range [0, EltSize), Neg == (Pos == 0 ? 0 : EltSize - Pos). This means that 4256 // for two opposing shifts shift1 and shift2 and a value X with OpBits bits: 4257 // 4258 // (or (shift1 X, Neg), (shift2 X, Pos)) 4259 // 4260 // reduces to a rotate in direction shift2 by Pos or (equivalently) a rotate 4261 // in direction shift1 by Neg. The range [0, EltSize) means that we only need 4262 // to consider shift amounts with defined behavior. 4263 static bool matchRotateSub(SDValue Pos, SDValue Neg, unsigned EltSize) { 4264 // If EltSize is a power of 2 then: 4265 // 4266 // (a) (Pos == 0 ? 0 : EltSize - Pos) == (EltSize - Pos) & (EltSize - 1) 4267 // (b) Neg == Neg & (EltSize - 1) whenever Neg is in [0, EltSize). 4268 // 4269 // So if EltSize is a power of 2 and Neg is (and Neg', EltSize-1), we check 4270 // for the stronger condition: 4271 // 4272 // Neg & (EltSize - 1) == (EltSize - Pos) & (EltSize - 1) [A] 4273 // 4274 // for all Neg and Pos. Since Neg & (EltSize - 1) == Neg' & (EltSize - 1) 4275 // we can just replace Neg with Neg' for the rest of the function. 4276 // 4277 // In other cases we check for the even stronger condition: 4278 // 4279 // Neg == EltSize - Pos [B] 4280 // 4281 // for all Neg and Pos. Note that the (or ...) then invokes undefined 4282 // behavior if Pos == 0 (and consequently Neg == EltSize). 4283 // 4284 // We could actually use [A] whenever EltSize is a power of 2, but the 4285 // only extra cases that it would match are those uninteresting ones 4286 // where Neg and Pos are never in range at the same time. E.g. for 4287 // EltSize == 32, using [A] would allow a Neg of the form (sub 64, Pos) 4288 // as well as (sub 32, Pos), but: 4289 // 4290 // (or (shift1 X, (sub 64, Pos)), (shift2 X, Pos)) 4291 // 4292 // always invokes undefined behavior for 32-bit X. 4293 // 4294 // Below, Mask == EltSize - 1 when using [A] and is all-ones otherwise. 4295 unsigned MaskLoBits = 0; 4296 if (Neg.getOpcode() == ISD::AND && isPowerOf2_64(EltSize)) { 4297 if (ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(1))) { 4298 if (NegC->getAPIntValue() == EltSize - 1) { 4299 Neg = Neg.getOperand(0); 4300 MaskLoBits = Log2_64(EltSize); 4301 } 4302 } 4303 } 4304 4305 // Check whether Neg has the form (sub NegC, NegOp1) for some NegC and NegOp1. 4306 if (Neg.getOpcode() != ISD::SUB) 4307 return false; 4308 ConstantSDNode *NegC = isConstOrConstSplat(Neg.getOperand(0)); 4309 if (!NegC) 4310 return false; 4311 SDValue NegOp1 = Neg.getOperand(1); 4312 4313 // On the RHS of [A], if Pos is Pos' & (EltSize - 1), just replace Pos with 4314 // Pos'. The truncation is redundant for the purpose of the equality. 4315 if (MaskLoBits && Pos.getOpcode() == ISD::AND) 4316 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) 4317 if (PosC->getAPIntValue() == EltSize - 1) 4318 Pos = Pos.getOperand(0); 4319 4320 // The condition we need is now: 4321 // 4322 // (NegC - NegOp1) & Mask == (EltSize - Pos) & Mask 4323 // 4324 // If NegOp1 == Pos then we need: 4325 // 4326 // EltSize & Mask == NegC & Mask 4327 // 4328 // (because "x & Mask" is a truncation and distributes through subtraction). 4329 APInt Width; 4330 if (Pos == NegOp1) 4331 Width = NegC->getAPIntValue(); 4332 4333 // Check for cases where Pos has the form (add NegOp1, PosC) for some PosC. 4334 // Then the condition we want to prove becomes: 4335 // 4336 // (NegC - NegOp1) & Mask == (EltSize - (NegOp1 + PosC)) & Mask 4337 // 4338 // which, again because "x & Mask" is a truncation, becomes: 4339 // 4340 // NegC & Mask == (EltSize - PosC) & Mask 4341 // EltSize & Mask == (NegC + PosC) & Mask 4342 else if (Pos.getOpcode() == ISD::ADD && Pos.getOperand(0) == NegOp1) { 4343 if (ConstantSDNode *PosC = isConstOrConstSplat(Pos.getOperand(1))) 4344 Width = PosC->getAPIntValue() + NegC->getAPIntValue(); 4345 else 4346 return false; 4347 } else 4348 return false; 4349 4350 // Now we just need to check that EltSize & Mask == Width & Mask. 4351 if (MaskLoBits) 4352 // EltSize & Mask is 0 since Mask is EltSize - 1. 4353 return Width.getLoBits(MaskLoBits) == 0; 4354 return Width == EltSize; 4355 } 4356 4357 // A subroutine of MatchRotate used once we have found an OR of two opposite 4358 // shifts of Shifted. If Neg == <operand size> - Pos then the OR reduces 4359 // to both (PosOpcode Shifted, Pos) and (NegOpcode Shifted, Neg), with the 4360 // former being preferred if supported. InnerPos and InnerNeg are Pos and 4361 // Neg with outer conversions stripped away. 4362 SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos, 4363 SDValue Neg, SDValue InnerPos, 4364 SDValue InnerNeg, unsigned PosOpcode, 4365 unsigned NegOpcode, const SDLoc &DL) { 4366 // fold (or (shl x, (*ext y)), 4367 // (srl x, (*ext (sub 32, y)))) -> 4368 // (rotl x, y) or (rotr x, (sub 32, y)) 4369 // 4370 // fold (or (shl x, (*ext (sub 32, y))), 4371 // (srl x, (*ext y))) -> 4372 // (rotr x, y) or (rotl x, (sub 32, y)) 4373 EVT VT = Shifted.getValueType(); 4374 if (matchRotateSub(InnerPos, InnerNeg, VT.getScalarSizeInBits())) { 4375 bool HasPos = TLI.isOperationLegalOrCustom(PosOpcode, VT); 4376 return DAG.getNode(HasPos ? PosOpcode : NegOpcode, DL, VT, Shifted, 4377 HasPos ? Pos : Neg).getNode(); 4378 } 4379 4380 return nullptr; 4381 } 4382 4383 // MatchRotate - Handle an 'or' of two operands. If this is one of the many 4384 // idioms for rotate, and if the target supports rotation instructions, generate 4385 // a rot[lr]. 4386 SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) { 4387 // Must be a legal type. Expanded 'n promoted things won't work with rotates. 4388 EVT VT = LHS.getValueType(); 4389 if (!TLI.isTypeLegal(VT)) return nullptr; 4390 4391 // The target must have at least one rotate flavor. 4392 bool HasROTL = TLI.isOperationLegalOrCustom(ISD::ROTL, VT); 4393 bool HasROTR = TLI.isOperationLegalOrCustom(ISD::ROTR, VT); 4394 if (!HasROTL && !HasROTR) return nullptr; 4395 4396 // Match "(X shl/srl V1) & V2" where V2 may not be present. 4397 SDValue LHSShift; // The shift. 4398 SDValue LHSMask; // AND value if any. 4399 if (!MatchRotateHalf(LHS, LHSShift, LHSMask)) 4400 return nullptr; // Not part of a rotate. 4401 4402 SDValue RHSShift; // The shift. 4403 SDValue RHSMask; // AND value if any. 4404 if (!MatchRotateHalf(RHS, RHSShift, RHSMask)) 4405 return nullptr; // Not part of a rotate. 4406 4407 if (LHSShift.getOperand(0) != RHSShift.getOperand(0)) 4408 return nullptr; // Not shifting the same value. 4409 4410 if (LHSShift.getOpcode() == RHSShift.getOpcode()) 4411 return nullptr; // Shifts must disagree. 4412 4413 // Canonicalize shl to left side in a shl/srl pair. 4414 if (RHSShift.getOpcode() == ISD::SHL) { 4415 std::swap(LHS, RHS); 4416 std::swap(LHSShift, RHSShift); 4417 std::swap(LHSMask, RHSMask); 4418 } 4419 4420 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 4421 SDValue LHSShiftArg = LHSShift.getOperand(0); 4422 SDValue LHSShiftAmt = LHSShift.getOperand(1); 4423 SDValue RHSShiftArg = RHSShift.getOperand(0); 4424 SDValue RHSShiftAmt = RHSShift.getOperand(1); 4425 4426 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1) 4427 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2) 4428 if (isConstOrConstSplat(LHSShiftAmt) && isConstOrConstSplat(RHSShiftAmt)) { 4429 uint64_t LShVal = isConstOrConstSplat(LHSShiftAmt)->getZExtValue(); 4430 uint64_t RShVal = isConstOrConstSplat(RHSShiftAmt)->getZExtValue(); 4431 if ((LShVal + RShVal) != EltSizeInBits) 4432 return nullptr; 4433 4434 SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT, 4435 LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt); 4436 4437 // If there is an AND of either shifted operand, apply it to the result. 4438 if (LHSMask.getNode() || RHSMask.getNode()) { 4439 SDValue Mask = DAG.getAllOnesConstant(DL, VT); 4440 4441 if (LHSMask.getNode()) { 4442 APInt RHSBits = APInt::getLowBitsSet(EltSizeInBits, LShVal); 4443 Mask = DAG.getNode(ISD::AND, DL, VT, Mask, 4444 DAG.getNode(ISD::OR, DL, VT, LHSMask, 4445 DAG.getConstant(RHSBits, DL, VT))); 4446 } 4447 if (RHSMask.getNode()) { 4448 APInt LHSBits = APInt::getHighBitsSet(EltSizeInBits, RShVal); 4449 Mask = DAG.getNode(ISD::AND, DL, VT, Mask, 4450 DAG.getNode(ISD::OR, DL, VT, RHSMask, 4451 DAG.getConstant(LHSBits, DL, VT))); 4452 } 4453 4454 Rot = DAG.getNode(ISD::AND, DL, VT, Rot, Mask); 4455 } 4456 4457 return Rot.getNode(); 4458 } 4459 4460 // If there is a mask here, and we have a variable shift, we can't be sure 4461 // that we're masking out the right stuff. 4462 if (LHSMask.getNode() || RHSMask.getNode()) 4463 return nullptr; 4464 4465 // If the shift amount is sign/zext/any-extended just peel it off. 4466 SDValue LExtOp0 = LHSShiftAmt; 4467 SDValue RExtOp0 = RHSShiftAmt; 4468 if ((LHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 4469 LHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 4470 LHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 4471 LHSShiftAmt.getOpcode() == ISD::TRUNCATE) && 4472 (RHSShiftAmt.getOpcode() == ISD::SIGN_EXTEND || 4473 RHSShiftAmt.getOpcode() == ISD::ZERO_EXTEND || 4474 RHSShiftAmt.getOpcode() == ISD::ANY_EXTEND || 4475 RHSShiftAmt.getOpcode() == ISD::TRUNCATE)) { 4476 LExtOp0 = LHSShiftAmt.getOperand(0); 4477 RExtOp0 = RHSShiftAmt.getOperand(0); 4478 } 4479 4480 SDNode *TryL = MatchRotatePosNeg(LHSShiftArg, LHSShiftAmt, RHSShiftAmt, 4481 LExtOp0, RExtOp0, ISD::ROTL, ISD::ROTR, DL); 4482 if (TryL) 4483 return TryL; 4484 4485 SDNode *TryR = MatchRotatePosNeg(RHSShiftArg, RHSShiftAmt, LHSShiftAmt, 4486 RExtOp0, LExtOp0, ISD::ROTR, ISD::ROTL, DL); 4487 if (TryR) 4488 return TryR; 4489 4490 return nullptr; 4491 } 4492 4493 namespace { 4494 /// Helper struct to parse and store a memory address as base + index + offset. 4495 /// We ignore sign extensions when it is safe to do so. 4496 /// The following two expressions are not equivalent. To differentiate we need 4497 /// to store whether there was a sign extension involved in the index 4498 /// computation. 4499 /// (load (i64 add (i64 copyfromreg %c) 4500 /// (i64 signextend (add (i8 load %index) 4501 /// (i8 1)))) 4502 /// vs 4503 /// 4504 /// (load (i64 add (i64 copyfromreg %c) 4505 /// (i64 signextend (i32 add (i32 signextend (i8 load %index)) 4506 /// (i32 1))))) 4507 struct BaseIndexOffset { 4508 SDValue Base; 4509 SDValue Index; 4510 int64_t Offset; 4511 bool IsIndexSignExt; 4512 4513 BaseIndexOffset() : Offset(0), IsIndexSignExt(false) {} 4514 4515 BaseIndexOffset(SDValue Base, SDValue Index, int64_t Offset, 4516 bool IsIndexSignExt) : 4517 Base(Base), Index(Index), Offset(Offset), IsIndexSignExt(IsIndexSignExt) {} 4518 4519 bool equalBaseIndex(const BaseIndexOffset &Other) { 4520 return Other.Base == Base && Other.Index == Index && 4521 Other.IsIndexSignExt == IsIndexSignExt; 4522 } 4523 4524 /// Parses tree in Ptr for base, index, offset addresses. 4525 static BaseIndexOffset match(SDValue Ptr, SelectionDAG &DAG, 4526 int64_t PartialOffset = 0) { 4527 bool IsIndexSignExt = false; 4528 4529 // Split up a folded GlobalAddress+Offset into its component parts. 4530 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Ptr)) 4531 if (GA->getOpcode() == ISD::GlobalAddress && GA->getOffset() != 0) { 4532 return BaseIndexOffset(DAG.getGlobalAddress(GA->getGlobal(), 4533 SDLoc(GA), 4534 GA->getValueType(0), 4535 /*Offset=*/PartialOffset, 4536 /*isTargetGA=*/false, 4537 GA->getTargetFlags()), 4538 SDValue(), 4539 GA->getOffset(), 4540 IsIndexSignExt); 4541 } 4542 4543 // We only can pattern match BASE + INDEX + OFFSET. If Ptr is not an ADD 4544 // instruction, then it could be just the BASE or everything else we don't 4545 // know how to handle. Just use Ptr as BASE and give up. 4546 if (Ptr->getOpcode() != ISD::ADD) 4547 return BaseIndexOffset(Ptr, SDValue(), PartialOffset, IsIndexSignExt); 4548 4549 // We know that we have at least an ADD instruction. Try to pattern match 4550 // the simple case of BASE + OFFSET. 4551 if (isa<ConstantSDNode>(Ptr->getOperand(1))) { 4552 int64_t Offset = cast<ConstantSDNode>(Ptr->getOperand(1))->getSExtValue(); 4553 return match(Ptr->getOperand(0), DAG, Offset + PartialOffset); 4554 } 4555 4556 // Inside a loop the current BASE pointer is calculated using an ADD and a 4557 // MUL instruction. In this case Ptr is the actual BASE pointer. 4558 // (i64 add (i64 %array_ptr) 4559 // (i64 mul (i64 %induction_var) 4560 // (i64 %element_size))) 4561 if (Ptr->getOperand(1)->getOpcode() == ISD::MUL) 4562 return BaseIndexOffset(Ptr, SDValue(), PartialOffset, IsIndexSignExt); 4563 4564 // Look at Base + Index + Offset cases. 4565 SDValue Base = Ptr->getOperand(0); 4566 SDValue IndexOffset = Ptr->getOperand(1); 4567 4568 // Skip signextends. 4569 if (IndexOffset->getOpcode() == ISD::SIGN_EXTEND) { 4570 IndexOffset = IndexOffset->getOperand(0); 4571 IsIndexSignExt = true; 4572 } 4573 4574 // Either the case of Base + Index (no offset) or something else. 4575 if (IndexOffset->getOpcode() != ISD::ADD) 4576 return BaseIndexOffset(Base, IndexOffset, PartialOffset, IsIndexSignExt); 4577 4578 // Now we have the case of Base + Index + offset. 4579 SDValue Index = IndexOffset->getOperand(0); 4580 SDValue Offset = IndexOffset->getOperand(1); 4581 4582 if (!isa<ConstantSDNode>(Offset)) 4583 return BaseIndexOffset(Ptr, SDValue(), PartialOffset, IsIndexSignExt); 4584 4585 // Ignore signextends. 4586 if (Index->getOpcode() == ISD::SIGN_EXTEND) { 4587 Index = Index->getOperand(0); 4588 IsIndexSignExt = true; 4589 } else IsIndexSignExt = false; 4590 4591 int64_t Off = cast<ConstantSDNode>(Offset)->getSExtValue(); 4592 return BaseIndexOffset(Base, Index, Off + PartialOffset, IsIndexSignExt); 4593 } 4594 }; 4595 } // namespace 4596 4597 namespace { 4598 /// Represents known origin of an individual byte in load combine pattern. The 4599 /// value of the byte is either constant zero or comes from memory. 4600 struct ByteProvider { 4601 // For constant zero providers Load is set to nullptr. For memory providers 4602 // Load represents the node which loads the byte from memory. 4603 // ByteOffset is the offset of the byte in the value produced by the load. 4604 LoadSDNode *Load; 4605 unsigned ByteOffset; 4606 4607 ByteProvider() : Load(nullptr), ByteOffset(0) {} 4608 4609 static ByteProvider getMemory(LoadSDNode *Load, unsigned ByteOffset) { 4610 return ByteProvider(Load, ByteOffset); 4611 } 4612 static ByteProvider getConstantZero() { return ByteProvider(nullptr, 0); } 4613 4614 bool isConstantZero() const { return !Load; } 4615 bool isMemory() const { return Load; } 4616 4617 bool operator==(const ByteProvider &Other) const { 4618 return Other.Load == Load && Other.ByteOffset == ByteOffset; 4619 } 4620 4621 private: 4622 ByteProvider(LoadSDNode *Load, unsigned ByteOffset) 4623 : Load(Load), ByteOffset(ByteOffset) {} 4624 }; 4625 4626 /// Recursively traverses the expression calculating the origin of the requested 4627 /// byte of the given value. Returns None if the provider can't be calculated. 4628 /// 4629 /// For all the values except the root of the expression verifies that the value 4630 /// has exactly one use and if it's not true return None. This way if the origin 4631 /// of the byte is returned it's guaranteed that the values which contribute to 4632 /// the byte are not used outside of this expression. 4633 /// 4634 /// Because the parts of the expression are not allowed to have more than one 4635 /// use this function iterates over trees, not DAGs. So it never visits the same 4636 /// node more than once. 4637 const Optional<ByteProvider> calculateByteProvider(SDValue Op, unsigned Index, 4638 unsigned Depth, 4639 bool Root = false) { 4640 // Typical i64 by i8 pattern requires recursion up to 8 calls depth 4641 if (Depth == 10) 4642 return None; 4643 4644 if (!Root && !Op.hasOneUse()) 4645 return None; 4646 4647 assert(Op.getValueType().isScalarInteger() && "can't handle other types"); 4648 unsigned BitWidth = Op.getValueSizeInBits(); 4649 if (BitWidth % 8 != 0) 4650 return None; 4651 unsigned ByteWidth = BitWidth / 8; 4652 assert(Index < ByteWidth && "invalid index requested"); 4653 (void) ByteWidth; 4654 4655 switch (Op.getOpcode()) { 4656 case ISD::OR: { 4657 auto LHS = calculateByteProvider(Op->getOperand(0), Index, Depth + 1); 4658 if (!LHS) 4659 return None; 4660 auto RHS = calculateByteProvider(Op->getOperand(1), Index, Depth + 1); 4661 if (!RHS) 4662 return None; 4663 4664 if (LHS->isConstantZero()) 4665 return RHS; 4666 if (RHS->isConstantZero()) 4667 return LHS; 4668 return None; 4669 } 4670 case ISD::SHL: { 4671 auto ShiftOp = dyn_cast<ConstantSDNode>(Op->getOperand(1)); 4672 if (!ShiftOp) 4673 return None; 4674 4675 uint64_t BitShift = ShiftOp->getZExtValue(); 4676 if (BitShift % 8 != 0) 4677 return None; 4678 uint64_t ByteShift = BitShift / 8; 4679 4680 return Index < ByteShift 4681 ? ByteProvider::getConstantZero() 4682 : calculateByteProvider(Op->getOperand(0), Index - ByteShift, 4683 Depth + 1); 4684 } 4685 case ISD::ANY_EXTEND: 4686 case ISD::SIGN_EXTEND: 4687 case ISD::ZERO_EXTEND: { 4688 SDValue NarrowOp = Op->getOperand(0); 4689 unsigned NarrowBitWidth = NarrowOp.getScalarValueSizeInBits(); 4690 if (NarrowBitWidth % 8 != 0) 4691 return None; 4692 uint64_t NarrowByteWidth = NarrowBitWidth / 8; 4693 4694 if (Index >= NarrowByteWidth) 4695 return Op.getOpcode() == ISD::ZERO_EXTEND 4696 ? Optional<ByteProvider>(ByteProvider::getConstantZero()) 4697 : None; 4698 return calculateByteProvider(NarrowOp, Index, Depth + 1); 4699 } 4700 case ISD::BSWAP: 4701 return calculateByteProvider(Op->getOperand(0), ByteWidth - Index - 1, 4702 Depth + 1); 4703 case ISD::LOAD: { 4704 auto L = cast<LoadSDNode>(Op.getNode()); 4705 if (L->isVolatile() || L->isIndexed()) 4706 return None; 4707 4708 unsigned NarrowBitWidth = L->getMemoryVT().getSizeInBits(); 4709 if (NarrowBitWidth % 8 != 0) 4710 return None; 4711 uint64_t NarrowByteWidth = NarrowBitWidth / 8; 4712 4713 if (Index >= NarrowByteWidth) 4714 return L->getExtensionType() == ISD::ZEXTLOAD 4715 ? Optional<ByteProvider>(ByteProvider::getConstantZero()) 4716 : None; 4717 return ByteProvider::getMemory(L, Index); 4718 } 4719 } 4720 4721 return None; 4722 } 4723 } // namespace 4724 4725 /// Match a pattern where a wide type scalar value is loaded by several narrow 4726 /// loads and combined by shifts and ors. Fold it into a single load or a load 4727 /// and a BSWAP if the targets supports it. 4728 /// 4729 /// Assuming little endian target: 4730 /// i8 *a = ... 4731 /// i32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24) 4732 /// => 4733 /// i32 val = *((i32)a) 4734 /// 4735 /// i8 *a = ... 4736 /// i32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3] 4737 /// => 4738 /// i32 val = BSWAP(*((i32)a)) 4739 /// 4740 /// TODO: This rule matches complex patterns with OR node roots and doesn't 4741 /// interact well with the worklist mechanism. When a part of the pattern is 4742 /// updated (e.g. one of the loads) its direct users are put into the worklist, 4743 /// but the root node of the pattern which triggers the load combine is not 4744 /// necessarily a direct user of the changed node. For example, once the address 4745 /// of t28 load is reassociated load combine won't be triggered: 4746 /// t25: i32 = add t4, Constant:i32<2> 4747 /// t26: i64 = sign_extend t25 4748 /// t27: i64 = add t2, t26 4749 /// t28: i8,ch = load<LD1[%tmp9]> t0, t27, undef:i64 4750 /// t29: i32 = zero_extend t28 4751 /// t32: i32 = shl t29, Constant:i8<8> 4752 /// t33: i32 = or t23, t32 4753 /// As a possible fix visitLoad can check if the load can be a part of a load 4754 /// combine pattern and add corresponding OR roots to the worklist. 4755 SDValue DAGCombiner::MatchLoadCombine(SDNode *N) { 4756 assert(N->getOpcode() == ISD::OR && 4757 "Can only match load combining against OR nodes"); 4758 4759 // Handles simple types only 4760 EVT VT = N->getValueType(0); 4761 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64) 4762 return SDValue(); 4763 unsigned ByteWidth = VT.getSizeInBits() / 8; 4764 4765 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4766 // Before legalize we can introduce too wide illegal loads which will be later 4767 // split into legal sized loads. This enables us to combine i64 load by i8 4768 // patterns to a couple of i32 loads on 32 bit targets. 4769 if (LegalOperations && !TLI.isOperationLegal(ISD::LOAD, VT)) 4770 return SDValue(); 4771 4772 std::function<unsigned(unsigned, unsigned)> LittleEndianByteAt = []( 4773 unsigned BW, unsigned i) { return i; }; 4774 std::function<unsigned(unsigned, unsigned)> BigEndianByteAt = []( 4775 unsigned BW, unsigned i) { return BW - i - 1; }; 4776 4777 bool IsBigEndianTarget = DAG.getDataLayout().isBigEndian(); 4778 auto MemoryByteOffset = [&] (ByteProvider P) { 4779 assert(P.isMemory() && "Must be a memory byte provider"); 4780 unsigned LoadBitWidth = P.Load->getMemoryVT().getSizeInBits(); 4781 assert(LoadBitWidth % 8 == 0 && 4782 "can only analyze providers for individual bytes not bit"); 4783 unsigned LoadByteWidth = LoadBitWidth / 8; 4784 return IsBigEndianTarget 4785 ? BigEndianByteAt(LoadByteWidth, P.ByteOffset) 4786 : LittleEndianByteAt(LoadByteWidth, P.ByteOffset); 4787 }; 4788 4789 Optional<BaseIndexOffset> Base; 4790 SDValue Chain; 4791 4792 SmallSet<LoadSDNode *, 8> Loads; 4793 Optional<ByteProvider> FirstByteProvider; 4794 int64_t FirstOffset = INT64_MAX; 4795 4796 // Check if all the bytes of the OR we are looking at are loaded from the same 4797 // base address. Collect bytes offsets from Base address in ByteOffsets. 4798 SmallVector<int64_t, 4> ByteOffsets(ByteWidth); 4799 for (unsigned i = 0; i < ByteWidth; i++) { 4800 auto P = calculateByteProvider(SDValue(N, 0), i, 0, /*Root=*/true); 4801 if (!P || !P->isMemory()) // All the bytes must be loaded from memory 4802 return SDValue(); 4803 4804 LoadSDNode *L = P->Load; 4805 assert(L->hasNUsesOfValue(1, 0) && !L->isVolatile() && !L->isIndexed() && 4806 "Must be enforced by calculateByteProvider"); 4807 assert(L->getOffset().isUndef() && "Unindexed load must have undef offset"); 4808 4809 // All loads must share the same chain 4810 SDValue LChain = L->getChain(); 4811 if (!Chain) 4812 Chain = LChain; 4813 else if (Chain != LChain) 4814 return SDValue(); 4815 4816 // Loads must share the same base address 4817 BaseIndexOffset Ptr = BaseIndexOffset::match(L->getBasePtr(), DAG); 4818 if (!Base) 4819 Base = Ptr; 4820 else if (!Base->equalBaseIndex(Ptr)) 4821 return SDValue(); 4822 4823 // Calculate the offset of the current byte from the base address 4824 int64_t ByteOffsetFromBase = Ptr.Offset + MemoryByteOffset(*P); 4825 ByteOffsets[i] = ByteOffsetFromBase; 4826 4827 // Remember the first byte load 4828 if (ByteOffsetFromBase < FirstOffset) { 4829 FirstByteProvider = P; 4830 FirstOffset = ByteOffsetFromBase; 4831 } 4832 4833 Loads.insert(L); 4834 } 4835 assert(Loads.size() > 0 && "All the bytes of the value must be loaded from " 4836 "memory, so there must be at least one load which produces the value"); 4837 assert(Base && "Base address of the accessed memory location must be set"); 4838 assert(FirstOffset != INT64_MAX && "First byte offset must be set"); 4839 4840 // Check if the bytes of the OR we are looking at match with either big or 4841 // little endian value load 4842 bool BigEndian = true, LittleEndian = true; 4843 for (unsigned i = 0; i < ByteWidth; i++) { 4844 int64_t CurrentByteOffset = ByteOffsets[i] - FirstOffset; 4845 LittleEndian &= CurrentByteOffset == LittleEndianByteAt(ByteWidth, i); 4846 BigEndian &= CurrentByteOffset == BigEndianByteAt(ByteWidth, i); 4847 if (!BigEndian && !LittleEndian) 4848 return SDValue(); 4849 } 4850 assert((BigEndian != LittleEndian) && "should be either or"); 4851 assert(FirstByteProvider && "must be set"); 4852 4853 // Ensure that the first byte is loaded from zero offset of the first load. 4854 // So the combined value can be loaded from the first load address. 4855 if (MemoryByteOffset(*FirstByteProvider) != 0) 4856 return SDValue(); 4857 LoadSDNode *FirstLoad = FirstByteProvider->Load; 4858 4859 // The node we are looking at matches with the pattern, check if we can 4860 // replace it with a single load and bswap if needed. 4861 4862 // If the load needs byte swap check if the target supports it 4863 bool NeedsBswap = IsBigEndianTarget != BigEndian; 4864 4865 // Before legalize we can introduce illegal bswaps which will be later 4866 // converted to an explicit bswap sequence. This way we end up with a single 4867 // load and byte shuffling instead of several loads and byte shuffling. 4868 if (NeedsBswap && LegalOperations && !TLI.isOperationLegal(ISD::BSWAP, VT)) 4869 return SDValue(); 4870 4871 // Check that a load of the wide type is both allowed and fast on the target 4872 bool Fast = false; 4873 bool Allowed = TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), 4874 VT, FirstLoad->getAddressSpace(), 4875 FirstLoad->getAlignment(), &Fast); 4876 if (!Allowed || !Fast) 4877 return SDValue(); 4878 4879 SDValue NewLoad = 4880 DAG.getLoad(VT, SDLoc(N), Chain, FirstLoad->getBasePtr(), 4881 FirstLoad->getPointerInfo(), FirstLoad->getAlignment()); 4882 4883 // Transfer chain users from old loads to the new load. 4884 for (LoadSDNode *L : Loads) 4885 DAG.ReplaceAllUsesOfValueWith(SDValue(L, 1), SDValue(NewLoad.getNode(), 1)); 4886 4887 return NeedsBswap ? DAG.getNode(ISD::BSWAP, SDLoc(N), VT, NewLoad) : NewLoad; 4888 } 4889 4890 SDValue DAGCombiner::visitXOR(SDNode *N) { 4891 SDValue N0 = N->getOperand(0); 4892 SDValue N1 = N->getOperand(1); 4893 EVT VT = N0.getValueType(); 4894 4895 // fold vector ops 4896 if (VT.isVector()) { 4897 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 4898 return FoldedVOp; 4899 4900 // fold (xor x, 0) -> x, vector edition 4901 if (ISD::isBuildVectorAllZeros(N0.getNode())) 4902 return N1; 4903 if (ISD::isBuildVectorAllZeros(N1.getNode())) 4904 return N0; 4905 } 4906 4907 // fold (xor undef, undef) -> 0. This is a common idiom (misuse). 4908 if (N0.isUndef() && N1.isUndef()) 4909 return DAG.getConstant(0, SDLoc(N), VT); 4910 // fold (xor x, undef) -> undef 4911 if (N0.isUndef()) 4912 return N0; 4913 if (N1.isUndef()) 4914 return N1; 4915 // fold (xor c1, c2) -> c1^c2 4916 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 4917 ConstantSDNode *N1C = getAsNonOpaqueConstant(N1); 4918 if (N0C && N1C) 4919 return DAG.FoldConstantArithmetic(ISD::XOR, SDLoc(N), VT, N0C, N1C); 4920 // canonicalize constant to RHS 4921 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 4922 !DAG.isConstantIntBuildVectorOrConstantInt(N1)) 4923 return DAG.getNode(ISD::XOR, SDLoc(N), VT, N1, N0); 4924 // fold (xor x, 0) -> x 4925 if (isNullConstant(N1)) 4926 return N0; 4927 4928 if (SDValue NewSel = foldBinOpIntoSelect(N)) 4929 return NewSel; 4930 4931 // reassociate xor 4932 if (SDValue RXOR = ReassociateOps(ISD::XOR, SDLoc(N), N0, N1)) 4933 return RXOR; 4934 4935 // fold !(x cc y) -> (x !cc y) 4936 SDValue LHS, RHS, CC; 4937 if (TLI.isConstTrueVal(N1.getNode()) && isSetCCEquivalent(N0, LHS, RHS, CC)) { 4938 bool isInt = LHS.getValueType().isInteger(); 4939 ISD::CondCode NotCC = ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 4940 isInt); 4941 4942 if (!LegalOperations || 4943 TLI.isCondCodeLegal(NotCC, LHS.getSimpleValueType())) { 4944 switch (N0.getOpcode()) { 4945 default: 4946 llvm_unreachable("Unhandled SetCC Equivalent!"); 4947 case ISD::SETCC: 4948 return DAG.getSetCC(SDLoc(N0), VT, LHS, RHS, NotCC); 4949 case ISD::SELECT_CC: 4950 return DAG.getSelectCC(SDLoc(N0), LHS, RHS, N0.getOperand(2), 4951 N0.getOperand(3), NotCC); 4952 } 4953 } 4954 } 4955 4956 // fold (not (zext (setcc x, y))) -> (zext (not (setcc x, y))) 4957 if (isOneConstant(N1) && N0.getOpcode() == ISD::ZERO_EXTEND && 4958 N0.getNode()->hasOneUse() && 4959 isSetCCEquivalent(N0.getOperand(0), LHS, RHS, CC)){ 4960 SDValue V = N0.getOperand(0); 4961 SDLoc DL(N0); 4962 V = DAG.getNode(ISD::XOR, DL, V.getValueType(), V, 4963 DAG.getConstant(1, DL, V.getValueType())); 4964 AddToWorklist(V.getNode()); 4965 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, V); 4966 } 4967 4968 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are setcc 4969 if (isOneConstant(N1) && VT == MVT::i1 && 4970 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 4971 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 4972 if (isOneUseSetCC(RHS) || isOneUseSetCC(LHS)) { 4973 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 4974 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS 4975 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS 4976 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode()); 4977 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); 4978 } 4979 } 4980 // fold (not (or x, y)) -> (and (not x), (not y)) iff x or y are constants 4981 if (isAllOnesConstant(N1) && 4982 (N0.getOpcode() == ISD::OR || N0.getOpcode() == ISD::AND)) { 4983 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 4984 if (isa<ConstantSDNode>(RHS) || isa<ConstantSDNode>(LHS)) { 4985 unsigned NewOpcode = N0.getOpcode() == ISD::AND ? ISD::OR : ISD::AND; 4986 LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), VT, LHS, N1); // LHS = ~LHS 4987 RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), VT, RHS, N1); // RHS = ~RHS 4988 AddToWorklist(LHS.getNode()); AddToWorklist(RHS.getNode()); 4989 return DAG.getNode(NewOpcode, SDLoc(N), VT, LHS, RHS); 4990 } 4991 } 4992 // fold (xor (and x, y), y) -> (and (not x), y) 4993 if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && 4994 N0->getOperand(1) == N1) { 4995 SDValue X = N0->getOperand(0); 4996 SDValue NotX = DAG.getNOT(SDLoc(X), X, VT); 4997 AddToWorklist(NotX.getNode()); 4998 return DAG.getNode(ISD::AND, SDLoc(N), VT, NotX, N1); 4999 } 5000 // fold (xor (xor x, c1), c2) -> (xor x, (xor c1, c2)) 5001 if (N1C && N0.getOpcode() == ISD::XOR) { 5002 if (const ConstantSDNode *N00C = getAsNonOpaqueConstant(N0.getOperand(0))) { 5003 SDLoc DL(N); 5004 return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(1), 5005 DAG.getConstant(N1C->getAPIntValue() ^ 5006 N00C->getAPIntValue(), DL, VT)); 5007 } 5008 if (const ConstantSDNode *N01C = getAsNonOpaqueConstant(N0.getOperand(1))) { 5009 SDLoc DL(N); 5010 return DAG.getNode(ISD::XOR, DL, VT, N0.getOperand(0), 5011 DAG.getConstant(N1C->getAPIntValue() ^ 5012 N01C->getAPIntValue(), DL, VT)); 5013 } 5014 } 5015 5016 // fold Y = sra (X, size(X)-1); xor (add (X, Y), Y) -> (abs X) 5017 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 5018 if (N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1 && 5019 N1.getOpcode() == ISD::SRA && N1.getOperand(0) == N0.getOperand(0) && 5020 TLI.isOperationLegalOrCustom(ISD::ABS, VT)) { 5021 if (ConstantSDNode *C = isConstOrConstSplat(N1.getOperand(1))) 5022 if (C->getAPIntValue() == (OpSizeInBits - 1)) 5023 return DAG.getNode(ISD::ABS, SDLoc(N), VT, N0.getOperand(0)); 5024 } 5025 5026 // fold (xor x, x) -> 0 5027 if (N0 == N1) 5028 return tryFoldToZero(SDLoc(N), TLI, VT, DAG, LegalOperations, LegalTypes); 5029 5030 // fold (xor (shl 1, x), -1) -> (rotl ~1, x) 5031 // Here is a concrete example of this equivalence: 5032 // i16 x == 14 5033 // i16 shl == 1 << 14 == 16384 == 0b0100000000000000 5034 // i16 xor == ~(1 << 14) == 49151 == 0b1011111111111111 5035 // 5036 // => 5037 // 5038 // i16 ~1 == 0b1111111111111110 5039 // i16 rol(~1, 14) == 0b1011111111111111 5040 // 5041 // Some additional tips to help conceptualize this transform: 5042 // - Try to see the operation as placing a single zero in a value of all ones. 5043 // - There exists no value for x which would allow the result to contain zero. 5044 // - Values of x larger than the bitwidth are undefined and do not require a 5045 // consistent result. 5046 // - Pushing the zero left requires shifting one bits in from the right. 5047 // A rotate left of ~1 is a nice way of achieving the desired result. 5048 if (TLI.isOperationLegalOrCustom(ISD::ROTL, VT) && N0.getOpcode() == ISD::SHL 5049 && isAllOnesConstant(N1) && isOneConstant(N0.getOperand(0))) { 5050 SDLoc DL(N); 5051 return DAG.getNode(ISD::ROTL, DL, VT, DAG.getConstant(~1, DL, VT), 5052 N0.getOperand(1)); 5053 } 5054 5055 // Simplify: xor (op x...), (op y...) -> (op (xor x, y)) 5056 if (N0.getOpcode() == N1.getOpcode()) 5057 if (SDValue Tmp = SimplifyBinOpWithSameOpcodeHands(N)) 5058 return Tmp; 5059 5060 // Simplify the expression using non-local knowledge. 5061 if (!VT.isVector() && 5062 SimplifyDemandedBits(SDValue(N, 0))) 5063 return SDValue(N, 0); 5064 5065 return SDValue(); 5066 } 5067 5068 /// Handle transforms common to the three shifts, when the shift amount is a 5069 /// constant. 5070 SDValue DAGCombiner::visitShiftByConstant(SDNode *N, ConstantSDNode *Amt) { 5071 SDNode *LHS = N->getOperand(0).getNode(); 5072 if (!LHS->hasOneUse()) return SDValue(); 5073 5074 // We want to pull some binops through shifts, so that we have (and (shift)) 5075 // instead of (shift (and)), likewise for add, or, xor, etc. This sort of 5076 // thing happens with address calculations, so it's important to canonicalize 5077 // it. 5078 bool HighBitSet = false; // Can we transform this if the high bit is set? 5079 5080 switch (LHS->getOpcode()) { 5081 default: return SDValue(); 5082 case ISD::OR: 5083 case ISD::XOR: 5084 HighBitSet = false; // We can only transform sra if the high bit is clear. 5085 break; 5086 case ISD::AND: 5087 HighBitSet = true; // We can only transform sra if the high bit is set. 5088 break; 5089 case ISD::ADD: 5090 if (N->getOpcode() != ISD::SHL) 5091 return SDValue(); // only shl(add) not sr[al](add). 5092 HighBitSet = false; // We can only transform sra if the high bit is clear. 5093 break; 5094 } 5095 5096 // We require the RHS of the binop to be a constant and not opaque as well. 5097 ConstantSDNode *BinOpCst = getAsNonOpaqueConstant(LHS->getOperand(1)); 5098 if (!BinOpCst) return SDValue(); 5099 5100 // FIXME: disable this unless the input to the binop is a shift by a constant 5101 // or is copy/select.Enable this in other cases when figure out it's exactly profitable. 5102 SDNode *BinOpLHSVal = LHS->getOperand(0).getNode(); 5103 bool isShift = BinOpLHSVal->getOpcode() == ISD::SHL || 5104 BinOpLHSVal->getOpcode() == ISD::SRA || 5105 BinOpLHSVal->getOpcode() == ISD::SRL; 5106 bool isCopyOrSelect = BinOpLHSVal->getOpcode() == ISD::CopyFromReg || 5107 BinOpLHSVal->getOpcode() == ISD::SELECT; 5108 5109 if ((!isShift || !isa<ConstantSDNode>(BinOpLHSVal->getOperand(1))) && 5110 !isCopyOrSelect) 5111 return SDValue(); 5112 5113 if (isCopyOrSelect && N->hasOneUse()) 5114 return SDValue(); 5115 5116 EVT VT = N->getValueType(0); 5117 5118 // If this is a signed shift right, and the high bit is modified by the 5119 // logical operation, do not perform the transformation. The highBitSet 5120 // boolean indicates the value of the high bit of the constant which would 5121 // cause it to be modified for this operation. 5122 if (N->getOpcode() == ISD::SRA) { 5123 bool BinOpRHSSignSet = BinOpCst->getAPIntValue().isNegative(); 5124 if (BinOpRHSSignSet != HighBitSet) 5125 return SDValue(); 5126 } 5127 5128 if (!TLI.isDesirableToCommuteWithShift(LHS)) 5129 return SDValue(); 5130 5131 // Fold the constants, shifting the binop RHS by the shift amount. 5132 SDValue NewRHS = DAG.getNode(N->getOpcode(), SDLoc(LHS->getOperand(1)), 5133 N->getValueType(0), 5134 LHS->getOperand(1), N->getOperand(1)); 5135 assert(isa<ConstantSDNode>(NewRHS) && "Folding was not successful!"); 5136 5137 // Create the new shift. 5138 SDValue NewShift = DAG.getNode(N->getOpcode(), 5139 SDLoc(LHS->getOperand(0)), 5140 VT, LHS->getOperand(0), N->getOperand(1)); 5141 5142 // Create the new binop. 5143 return DAG.getNode(LHS->getOpcode(), SDLoc(N), VT, NewShift, NewRHS); 5144 } 5145 5146 SDValue DAGCombiner::distributeTruncateThroughAnd(SDNode *N) { 5147 assert(N->getOpcode() == ISD::TRUNCATE); 5148 assert(N->getOperand(0).getOpcode() == ISD::AND); 5149 5150 // (truncate:TruncVT (and N00, N01C)) -> (and (truncate:TruncVT N00), TruncC) 5151 if (N->hasOneUse() && N->getOperand(0).hasOneUse()) { 5152 SDValue N01 = N->getOperand(0).getOperand(1); 5153 if (isConstantOrConstantVector(N01, /* NoOpaques */ true)) { 5154 SDLoc DL(N); 5155 EVT TruncVT = N->getValueType(0); 5156 SDValue N00 = N->getOperand(0).getOperand(0); 5157 SDValue Trunc00 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N00); 5158 SDValue Trunc01 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, N01); 5159 AddToWorklist(Trunc00.getNode()); 5160 AddToWorklist(Trunc01.getNode()); 5161 return DAG.getNode(ISD::AND, DL, TruncVT, Trunc00, Trunc01); 5162 } 5163 } 5164 5165 return SDValue(); 5166 } 5167 5168 SDValue DAGCombiner::visitRotate(SDNode *N) { 5169 // fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))). 5170 if (N->getOperand(1).getOpcode() == ISD::TRUNCATE && 5171 N->getOperand(1).getOperand(0).getOpcode() == ISD::AND) { 5172 if (SDValue NewOp1 = 5173 distributeTruncateThroughAnd(N->getOperand(1).getNode())) 5174 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), 5175 N->getOperand(0), NewOp1); 5176 } 5177 return SDValue(); 5178 } 5179 5180 SDValue DAGCombiner::visitSHL(SDNode *N) { 5181 SDValue N0 = N->getOperand(0); 5182 SDValue N1 = N->getOperand(1); 5183 EVT VT = N0.getValueType(); 5184 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 5185 5186 // fold vector ops 5187 if (VT.isVector()) { 5188 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 5189 return FoldedVOp; 5190 5191 BuildVectorSDNode *N1CV = dyn_cast<BuildVectorSDNode>(N1); 5192 // If setcc produces all-one true value then: 5193 // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV) 5194 if (N1CV && N1CV->isConstant()) { 5195 if (N0.getOpcode() == ISD::AND) { 5196 SDValue N00 = N0->getOperand(0); 5197 SDValue N01 = N0->getOperand(1); 5198 BuildVectorSDNode *N01CV = dyn_cast<BuildVectorSDNode>(N01); 5199 5200 if (N01CV && N01CV->isConstant() && N00.getOpcode() == ISD::SETCC && 5201 TLI.getBooleanContents(N00.getOperand(0).getValueType()) == 5202 TargetLowering::ZeroOrNegativeOneBooleanContent) { 5203 if (SDValue C = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, 5204 N01CV, N1CV)) 5205 return DAG.getNode(ISD::AND, SDLoc(N), VT, N00, C); 5206 } 5207 } 5208 } 5209 } 5210 5211 ConstantSDNode *N1C = isConstOrConstSplat(N1); 5212 5213 // fold (shl c1, c2) -> c1<<c2 5214 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 5215 if (N0C && N1C && !N1C->isOpaque()) 5216 return DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N), VT, N0C, N1C); 5217 // fold (shl 0, x) -> 0 5218 if (isNullConstant(N0)) 5219 return N0; 5220 // fold (shl x, c >= size(x)) -> undef 5221 if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) 5222 return DAG.getUNDEF(VT); 5223 // fold (shl x, 0) -> x 5224 if (N1C && N1C->isNullValue()) 5225 return N0; 5226 // fold (shl undef, x) -> 0 5227 if (N0.isUndef()) 5228 return DAG.getConstant(0, SDLoc(N), VT); 5229 5230 if (SDValue NewSel = foldBinOpIntoSelect(N)) 5231 return NewSel; 5232 5233 // if (shl x, c) is known to be zero, return 0 5234 if (DAG.MaskedValueIsZero(SDValue(N, 0), 5235 APInt::getAllOnesValue(OpSizeInBits))) 5236 return DAG.getConstant(0, SDLoc(N), VT); 5237 // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))). 5238 if (N1.getOpcode() == ISD::TRUNCATE && 5239 N1.getOperand(0).getOpcode() == ISD::AND) { 5240 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) 5241 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1); 5242 } 5243 5244 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 5245 return SDValue(N, 0); 5246 5247 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2)) 5248 if (N1C && N0.getOpcode() == ISD::SHL) { 5249 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5250 SDLoc DL(N); 5251 APInt c1 = N0C1->getAPIntValue(); 5252 APInt c2 = N1C->getAPIntValue(); 5253 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 5254 5255 APInt Sum = c1 + c2; 5256 if (Sum.uge(OpSizeInBits)) 5257 return DAG.getConstant(0, DL, VT); 5258 5259 return DAG.getNode( 5260 ISD::SHL, DL, VT, N0.getOperand(0), 5261 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 5262 } 5263 } 5264 5265 // fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2))) 5266 // For this to be valid, the second form must not preserve any of the bits 5267 // that are shifted out by the inner shift in the first form. This means 5268 // the outer shift size must be >= the number of bits added by the ext. 5269 // As a corollary, we don't care what kind of ext it is. 5270 if (N1C && (N0.getOpcode() == ISD::ZERO_EXTEND || 5271 N0.getOpcode() == ISD::ANY_EXTEND || 5272 N0.getOpcode() == ISD::SIGN_EXTEND) && 5273 N0.getOperand(0).getOpcode() == ISD::SHL) { 5274 SDValue N0Op0 = N0.getOperand(0); 5275 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) { 5276 APInt c1 = N0Op0C1->getAPIntValue(); 5277 APInt c2 = N1C->getAPIntValue(); 5278 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 5279 5280 EVT InnerShiftVT = N0Op0.getValueType(); 5281 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits(); 5282 if (c2.uge(OpSizeInBits - InnerShiftSize)) { 5283 SDLoc DL(N0); 5284 APInt Sum = c1 + c2; 5285 if (Sum.uge(OpSizeInBits)) 5286 return DAG.getConstant(0, DL, VT); 5287 5288 return DAG.getNode( 5289 ISD::SHL, DL, VT, 5290 DAG.getNode(N0.getOpcode(), DL, VT, N0Op0->getOperand(0)), 5291 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 5292 } 5293 } 5294 } 5295 5296 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C)) 5297 // Only fold this if the inner zext has no other uses to avoid increasing 5298 // the total number of instructions. 5299 if (N1C && N0.getOpcode() == ISD::ZERO_EXTEND && N0.hasOneUse() && 5300 N0.getOperand(0).getOpcode() == ISD::SRL) { 5301 SDValue N0Op0 = N0.getOperand(0); 5302 if (ConstantSDNode *N0Op0C1 = isConstOrConstSplat(N0Op0.getOperand(1))) { 5303 if (N0Op0C1->getAPIntValue().ult(VT.getScalarSizeInBits())) { 5304 uint64_t c1 = N0Op0C1->getZExtValue(); 5305 uint64_t c2 = N1C->getZExtValue(); 5306 if (c1 == c2) { 5307 SDValue NewOp0 = N0.getOperand(0); 5308 EVT CountVT = NewOp0.getOperand(1).getValueType(); 5309 SDLoc DL(N); 5310 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, NewOp0.getValueType(), 5311 NewOp0, 5312 DAG.getConstant(c2, DL, CountVT)); 5313 AddToWorklist(NewSHL.getNode()); 5314 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N0), VT, NewSHL); 5315 } 5316 } 5317 } 5318 } 5319 5320 // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2 5321 // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2 5322 if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) && 5323 cast<BinaryWithFlagsSDNode>(N0)->Flags.hasExact()) { 5324 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5325 uint64_t C1 = N0C1->getZExtValue(); 5326 uint64_t C2 = N1C->getZExtValue(); 5327 SDLoc DL(N); 5328 if (C1 <= C2) 5329 return DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), 5330 DAG.getConstant(C2 - C1, DL, N1.getValueType())); 5331 return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0), 5332 DAG.getConstant(C1 - C2, DL, N1.getValueType())); 5333 } 5334 } 5335 5336 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or 5337 // (and (srl x, (sub c1, c2), MASK) 5338 // Only fold this if the inner shift has no other uses -- if it does, folding 5339 // this will increase the total number of instructions. 5340 if (N1C && N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 5341 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5342 uint64_t c1 = N0C1->getZExtValue(); 5343 if (c1 < OpSizeInBits) { 5344 uint64_t c2 = N1C->getZExtValue(); 5345 APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1); 5346 SDValue Shift; 5347 if (c2 > c1) { 5348 Mask = Mask.shl(c2 - c1); 5349 SDLoc DL(N); 5350 Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0), 5351 DAG.getConstant(c2 - c1, DL, N1.getValueType())); 5352 } else { 5353 Mask = Mask.lshr(c1 - c2); 5354 SDLoc DL(N); 5355 Shift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), 5356 DAG.getConstant(c1 - c2, DL, N1.getValueType())); 5357 } 5358 SDLoc DL(N0); 5359 return DAG.getNode(ISD::AND, DL, VT, Shift, 5360 DAG.getConstant(Mask, DL, VT)); 5361 } 5362 } 5363 } 5364 5365 // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1)) 5366 if (N0.getOpcode() == ISD::SRA && N1 == N0.getOperand(1) && 5367 isConstantOrConstantVector(N1, /* No Opaques */ true)) { 5368 SDLoc DL(N); 5369 SDValue AllBits = DAG.getAllOnesConstant(DL, VT); 5370 SDValue HiBitsMask = DAG.getNode(ISD::SHL, DL, VT, AllBits, N1); 5371 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), HiBitsMask); 5372 } 5373 5374 // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2) 5375 // Variant of version done on multiply, except mul by a power of 2 is turned 5376 // into a shift. 5377 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse() && 5378 isConstantOrConstantVector(N1, /* No Opaques */ true) && 5379 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) { 5380 SDValue Shl0 = DAG.getNode(ISD::SHL, SDLoc(N0), VT, N0.getOperand(0), N1); 5381 SDValue Shl1 = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1); 5382 AddToWorklist(Shl0.getNode()); 5383 AddToWorklist(Shl1.getNode()); 5384 return DAG.getNode(ISD::ADD, SDLoc(N), VT, Shl0, Shl1); 5385 } 5386 5387 // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2) 5388 if (N0.getOpcode() == ISD::MUL && N0.getNode()->hasOneUse() && 5389 isConstantOrConstantVector(N1, /* No Opaques */ true) && 5390 isConstantOrConstantVector(N0.getOperand(1), /* No Opaques */ true)) { 5391 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N1), VT, N0.getOperand(1), N1); 5392 if (isConstantOrConstantVector(Shl)) 5393 return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), Shl); 5394 } 5395 5396 if (N1C && !N1C->isOpaque()) 5397 if (SDValue NewSHL = visitShiftByConstant(N, N1C)) 5398 return NewSHL; 5399 5400 return SDValue(); 5401 } 5402 5403 SDValue DAGCombiner::visitSRA(SDNode *N) { 5404 SDValue N0 = N->getOperand(0); 5405 SDValue N1 = N->getOperand(1); 5406 EVT VT = N0.getValueType(); 5407 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 5408 5409 // Arithmetic shifting an all-sign-bit value is a no-op. 5410 if (DAG.ComputeNumSignBits(N0) == OpSizeInBits) 5411 return N0; 5412 5413 // fold vector ops 5414 if (VT.isVector()) 5415 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 5416 return FoldedVOp; 5417 5418 ConstantSDNode *N1C = isConstOrConstSplat(N1); 5419 5420 // fold (sra c1, c2) -> (sra c1, c2) 5421 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 5422 if (N0C && N1C && !N1C->isOpaque()) 5423 return DAG.FoldConstantArithmetic(ISD::SRA, SDLoc(N), VT, N0C, N1C); 5424 // fold (sra 0, x) -> 0 5425 if (isNullConstant(N0)) 5426 return N0; 5427 // fold (sra -1, x) -> -1 5428 if (isAllOnesConstant(N0)) 5429 return N0; 5430 // fold (sra x, c >= size(x)) -> undef 5431 if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) 5432 return DAG.getUNDEF(VT); 5433 // fold (sra x, 0) -> x 5434 if (N1C && N1C->isNullValue()) 5435 return N0; 5436 5437 if (SDValue NewSel = foldBinOpIntoSelect(N)) 5438 return NewSel; 5439 5440 // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports 5441 // sext_inreg. 5442 if (N1C && N0.getOpcode() == ISD::SHL && N1 == N0.getOperand(1)) { 5443 unsigned LowBits = OpSizeInBits - (unsigned)N1C->getZExtValue(); 5444 EVT ExtVT = EVT::getIntegerVT(*DAG.getContext(), LowBits); 5445 if (VT.isVector()) 5446 ExtVT = EVT::getVectorVT(*DAG.getContext(), 5447 ExtVT, VT.getVectorNumElements()); 5448 if ((!LegalOperations || 5449 TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, ExtVT))) 5450 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 5451 N0.getOperand(0), DAG.getValueType(ExtVT)); 5452 } 5453 5454 // fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2)) 5455 if (N1C && N0.getOpcode() == ISD::SRA) { 5456 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5457 SDLoc DL(N); 5458 APInt c1 = N0C1->getAPIntValue(); 5459 APInt c2 = N1C->getAPIntValue(); 5460 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 5461 5462 APInt Sum = c1 + c2; 5463 if (Sum.uge(OpSizeInBits)) 5464 Sum = APInt(OpSizeInBits, OpSizeInBits - 1); 5465 5466 return DAG.getNode( 5467 ISD::SRA, DL, VT, N0.getOperand(0), 5468 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 5469 } 5470 } 5471 5472 // fold (sra (shl X, m), (sub result_size, n)) 5473 // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for 5474 // result_size - n != m. 5475 // If truncate is free for the target sext(shl) is likely to result in better 5476 // code. 5477 if (N0.getOpcode() == ISD::SHL && N1C) { 5478 // Get the two constanst of the shifts, CN0 = m, CN = n. 5479 const ConstantSDNode *N01C = isConstOrConstSplat(N0.getOperand(1)); 5480 if (N01C) { 5481 LLVMContext &Ctx = *DAG.getContext(); 5482 // Determine what the truncate's result bitsize and type would be. 5483 EVT TruncVT = EVT::getIntegerVT(Ctx, OpSizeInBits - N1C->getZExtValue()); 5484 5485 if (VT.isVector()) 5486 TruncVT = EVT::getVectorVT(Ctx, TruncVT, VT.getVectorNumElements()); 5487 5488 // Determine the residual right-shift amount. 5489 int ShiftAmt = N1C->getZExtValue() - N01C->getZExtValue(); 5490 5491 // If the shift is not a no-op (in which case this should be just a sign 5492 // extend already), the truncated to type is legal, sign_extend is legal 5493 // on that type, and the truncate to that type is both legal and free, 5494 // perform the transform. 5495 if ((ShiftAmt > 0) && 5496 TLI.isOperationLegalOrCustom(ISD::SIGN_EXTEND, TruncVT) && 5497 TLI.isOperationLegalOrCustom(ISD::TRUNCATE, VT) && 5498 TLI.isTruncateFree(VT, TruncVT)) { 5499 5500 SDLoc DL(N); 5501 SDValue Amt = DAG.getConstant(ShiftAmt, DL, 5502 getShiftAmountTy(N0.getOperand(0).getValueType())); 5503 SDValue Shift = DAG.getNode(ISD::SRL, DL, VT, 5504 N0.getOperand(0), Amt); 5505 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, 5506 Shift); 5507 return DAG.getNode(ISD::SIGN_EXTEND, DL, 5508 N->getValueType(0), Trunc); 5509 } 5510 } 5511 } 5512 5513 // fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))). 5514 if (N1.getOpcode() == ISD::TRUNCATE && 5515 N1.getOperand(0).getOpcode() == ISD::AND) { 5516 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) 5517 return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1); 5518 } 5519 5520 // fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2)) 5521 // if c1 is equal to the number of bits the trunc removes 5522 if (N0.getOpcode() == ISD::TRUNCATE && 5523 (N0.getOperand(0).getOpcode() == ISD::SRL || 5524 N0.getOperand(0).getOpcode() == ISD::SRA) && 5525 N0.getOperand(0).hasOneUse() && 5526 N0.getOperand(0).getOperand(1).hasOneUse() && 5527 N1C) { 5528 SDValue N0Op0 = N0.getOperand(0); 5529 if (ConstantSDNode *LargeShift = isConstOrConstSplat(N0Op0.getOperand(1))) { 5530 unsigned LargeShiftVal = LargeShift->getZExtValue(); 5531 EVT LargeVT = N0Op0.getValueType(); 5532 5533 if (LargeVT.getScalarSizeInBits() - OpSizeInBits == LargeShiftVal) { 5534 SDLoc DL(N); 5535 SDValue Amt = 5536 DAG.getConstant(LargeShiftVal + N1C->getZExtValue(), DL, 5537 getShiftAmountTy(N0Op0.getOperand(0).getValueType())); 5538 SDValue SRA = DAG.getNode(ISD::SRA, DL, LargeVT, 5539 N0Op0.getOperand(0), Amt); 5540 return DAG.getNode(ISD::TRUNCATE, DL, VT, SRA); 5541 } 5542 } 5543 } 5544 5545 // Simplify, based on bits shifted out of the LHS. 5546 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 5547 return SDValue(N, 0); 5548 5549 5550 // If the sign bit is known to be zero, switch this to a SRL. 5551 if (DAG.SignBitIsZero(N0)) 5552 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, N1); 5553 5554 if (N1C && !N1C->isOpaque()) 5555 if (SDValue NewSRA = visitShiftByConstant(N, N1C)) 5556 return NewSRA; 5557 5558 return SDValue(); 5559 } 5560 5561 SDValue DAGCombiner::visitSRL(SDNode *N) { 5562 SDValue N0 = N->getOperand(0); 5563 SDValue N1 = N->getOperand(1); 5564 EVT VT = N0.getValueType(); 5565 unsigned OpSizeInBits = VT.getScalarSizeInBits(); 5566 5567 // fold vector ops 5568 if (VT.isVector()) 5569 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 5570 return FoldedVOp; 5571 5572 ConstantSDNode *N1C = isConstOrConstSplat(N1); 5573 5574 // fold (srl c1, c2) -> c1 >>u c2 5575 ConstantSDNode *N0C = getAsNonOpaqueConstant(N0); 5576 if (N0C && N1C && !N1C->isOpaque()) 5577 return DAG.FoldConstantArithmetic(ISD::SRL, SDLoc(N), VT, N0C, N1C); 5578 // fold (srl 0, x) -> 0 5579 if (isNullConstant(N0)) 5580 return N0; 5581 // fold (srl x, c >= size(x)) -> undef 5582 if (N1C && N1C->getAPIntValue().uge(OpSizeInBits)) 5583 return DAG.getUNDEF(VT); 5584 // fold (srl x, 0) -> x 5585 if (N1C && N1C->isNullValue()) 5586 return N0; 5587 5588 if (SDValue NewSel = foldBinOpIntoSelect(N)) 5589 return NewSel; 5590 5591 // if (srl x, c) is known to be zero, return 0 5592 if (N1C && DAG.MaskedValueIsZero(SDValue(N, 0), 5593 APInt::getAllOnesValue(OpSizeInBits))) 5594 return DAG.getConstant(0, SDLoc(N), VT); 5595 5596 // fold (srl (srl x, c1), c2) -> 0 or (srl x, (add c1, c2)) 5597 if (N1C && N0.getOpcode() == ISD::SRL) { 5598 if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) { 5599 SDLoc DL(N); 5600 APInt c1 = N0C1->getAPIntValue(); 5601 APInt c2 = N1C->getAPIntValue(); 5602 zeroExtendToMatch(c1, c2, 1 /* Overflow Bit */); 5603 5604 APInt Sum = c1 + c2; 5605 if (Sum.uge(OpSizeInBits)) 5606 return DAG.getConstant(0, DL, VT); 5607 5608 return DAG.getNode( 5609 ISD::SRL, DL, VT, N0.getOperand(0), 5610 DAG.getConstant(Sum.getZExtValue(), DL, N1.getValueType())); 5611 } 5612 } 5613 5614 // fold (srl (trunc (srl x, c1)), c2) -> 0 or (trunc (srl x, (add c1, c2))) 5615 if (N1C && N0.getOpcode() == ISD::TRUNCATE && 5616 N0.getOperand(0).getOpcode() == ISD::SRL && 5617 isa<ConstantSDNode>(N0.getOperand(0)->getOperand(1))) { 5618 uint64_t c1 = 5619 cast<ConstantSDNode>(N0.getOperand(0)->getOperand(1))->getZExtValue(); 5620 uint64_t c2 = N1C->getZExtValue(); 5621 EVT InnerShiftVT = N0.getOperand(0).getValueType(); 5622 EVT ShiftCountVT = N0.getOperand(0)->getOperand(1).getValueType(); 5623 uint64_t InnerShiftSize = InnerShiftVT.getScalarSizeInBits(); 5624 // This is only valid if the OpSizeInBits + c1 = size of inner shift. 5625 if (c1 + OpSizeInBits == InnerShiftSize) { 5626 SDLoc DL(N0); 5627 if (c1 + c2 >= InnerShiftSize) 5628 return DAG.getConstant(0, DL, VT); 5629 return DAG.getNode(ISD::TRUNCATE, DL, VT, 5630 DAG.getNode(ISD::SRL, DL, InnerShiftVT, 5631 N0.getOperand(0)->getOperand(0), 5632 DAG.getConstant(c1 + c2, DL, 5633 ShiftCountVT))); 5634 } 5635 } 5636 5637 // fold (srl (shl x, c), c) -> (and x, cst2) 5638 if (N0.getOpcode() == ISD::SHL && N0.getOperand(1) == N1 && 5639 isConstantOrConstantVector(N1, /* NoOpaques */ true)) { 5640 SDLoc DL(N); 5641 SDValue Mask = 5642 DAG.getNode(ISD::SRL, DL, VT, DAG.getAllOnesConstant(DL, VT), N1); 5643 AddToWorklist(Mask.getNode()); 5644 return DAG.getNode(ISD::AND, DL, VT, N0.getOperand(0), Mask); 5645 } 5646 5647 // fold (srl (anyextend x), c) -> (and (anyextend (srl x, c)), mask) 5648 if (N1C && N0.getOpcode() == ISD::ANY_EXTEND) { 5649 // Shifting in all undef bits? 5650 EVT SmallVT = N0.getOperand(0).getValueType(); 5651 unsigned BitSize = SmallVT.getScalarSizeInBits(); 5652 if (N1C->getZExtValue() >= BitSize) 5653 return DAG.getUNDEF(VT); 5654 5655 if (!LegalTypes || TLI.isTypeDesirableForOp(ISD::SRL, SmallVT)) { 5656 uint64_t ShiftAmt = N1C->getZExtValue(); 5657 SDLoc DL0(N0); 5658 SDValue SmallShift = DAG.getNode(ISD::SRL, DL0, SmallVT, 5659 N0.getOperand(0), 5660 DAG.getConstant(ShiftAmt, DL0, 5661 getShiftAmountTy(SmallVT))); 5662 AddToWorklist(SmallShift.getNode()); 5663 APInt Mask = APInt::getAllOnesValue(OpSizeInBits).lshr(ShiftAmt); 5664 SDLoc DL(N); 5665 return DAG.getNode(ISD::AND, DL, VT, 5666 DAG.getNode(ISD::ANY_EXTEND, DL, VT, SmallShift), 5667 DAG.getConstant(Mask, DL, VT)); 5668 } 5669 } 5670 5671 // fold (srl (sra X, Y), 31) -> (srl X, 31). This srl only looks at the sign 5672 // bit, which is unmodified by sra. 5673 if (N1C && N1C->getZExtValue() + 1 == OpSizeInBits) { 5674 if (N0.getOpcode() == ISD::SRA) 5675 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0.getOperand(0), N1); 5676 } 5677 5678 // fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit). 5679 if (N1C && N0.getOpcode() == ISD::CTLZ && 5680 N1C->getAPIntValue() == Log2_32(OpSizeInBits)) { 5681 APInt KnownZero, KnownOne; 5682 DAG.computeKnownBits(N0.getOperand(0), KnownZero, KnownOne); 5683 5684 // If any of the input bits are KnownOne, then the input couldn't be all 5685 // zeros, thus the result of the srl will always be zero. 5686 if (KnownOne.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT); 5687 5688 // If all of the bits input the to ctlz node are known to be zero, then 5689 // the result of the ctlz is "32" and the result of the shift is one. 5690 APInt UnknownBits = ~KnownZero; 5691 if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT); 5692 5693 // Otherwise, check to see if there is exactly one bit input to the ctlz. 5694 if ((UnknownBits & (UnknownBits - 1)) == 0) { 5695 // Okay, we know that only that the single bit specified by UnknownBits 5696 // could be set on input to the CTLZ node. If this bit is set, the SRL 5697 // will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair 5698 // to an SRL/XOR pair, which is likely to simplify more. 5699 unsigned ShAmt = UnknownBits.countTrailingZeros(); 5700 SDValue Op = N0.getOperand(0); 5701 5702 if (ShAmt) { 5703 SDLoc DL(N0); 5704 Op = DAG.getNode(ISD::SRL, DL, VT, Op, 5705 DAG.getConstant(ShAmt, DL, 5706 getShiftAmountTy(Op.getValueType()))); 5707 AddToWorklist(Op.getNode()); 5708 } 5709 5710 SDLoc DL(N); 5711 return DAG.getNode(ISD::XOR, DL, VT, 5712 Op, DAG.getConstant(1, DL, VT)); 5713 } 5714 } 5715 5716 // fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))). 5717 if (N1.getOpcode() == ISD::TRUNCATE && 5718 N1.getOperand(0).getOpcode() == ISD::AND) { 5719 if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode())) 5720 return DAG.getNode(ISD::SRL, SDLoc(N), VT, N0, NewOp1); 5721 } 5722 5723 // fold operands of srl based on knowledge that the low bits are not 5724 // demanded. 5725 if (N1C && SimplifyDemandedBits(SDValue(N, 0))) 5726 return SDValue(N, 0); 5727 5728 if (N1C && !N1C->isOpaque()) 5729 if (SDValue NewSRL = visitShiftByConstant(N, N1C)) 5730 return NewSRL; 5731 5732 // Attempt to convert a srl of a load into a narrower zero-extending load. 5733 if (SDValue NarrowLoad = ReduceLoadWidth(N)) 5734 return NarrowLoad; 5735 5736 // Here is a common situation. We want to optimize: 5737 // 5738 // %a = ... 5739 // %b = and i32 %a, 2 5740 // %c = srl i32 %b, 1 5741 // brcond i32 %c ... 5742 // 5743 // into 5744 // 5745 // %a = ... 5746 // %b = and %a, 2 5747 // %c = setcc eq %b, 0 5748 // brcond %c ... 5749 // 5750 // However when after the source operand of SRL is optimized into AND, the SRL 5751 // itself may not be optimized further. Look for it and add the BRCOND into 5752 // the worklist. 5753 if (N->hasOneUse()) { 5754 SDNode *Use = *N->use_begin(); 5755 if (Use->getOpcode() == ISD::BRCOND) 5756 AddToWorklist(Use); 5757 else if (Use->getOpcode() == ISD::TRUNCATE && Use->hasOneUse()) { 5758 // Also look pass the truncate. 5759 Use = *Use->use_begin(); 5760 if (Use->getOpcode() == ISD::BRCOND) 5761 AddToWorklist(Use); 5762 } 5763 } 5764 5765 return SDValue(); 5766 } 5767 5768 SDValue DAGCombiner::visitABS(SDNode *N) { 5769 SDValue N0 = N->getOperand(0); 5770 EVT VT = N->getValueType(0); 5771 5772 // fold (abs c1) -> c2 5773 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5774 return DAG.getNode(ISD::ABS, SDLoc(N), VT, N0); 5775 // fold (abs (abs x)) -> (abs x) 5776 if (N0.getOpcode() == ISD::ABS) 5777 return N0; 5778 // fold (abs x) -> x iff not-negative 5779 if (DAG.SignBitIsZero(N0)) 5780 return N0; 5781 return SDValue(); 5782 } 5783 5784 SDValue DAGCombiner::visitBSWAP(SDNode *N) { 5785 SDValue N0 = N->getOperand(0); 5786 EVT VT = N->getValueType(0); 5787 5788 // fold (bswap c1) -> c2 5789 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5790 return DAG.getNode(ISD::BSWAP, SDLoc(N), VT, N0); 5791 // fold (bswap (bswap x)) -> x 5792 if (N0.getOpcode() == ISD::BSWAP) 5793 return N0->getOperand(0); 5794 return SDValue(); 5795 } 5796 5797 SDValue DAGCombiner::visitBITREVERSE(SDNode *N) { 5798 SDValue N0 = N->getOperand(0); 5799 EVT VT = N->getValueType(0); 5800 5801 // fold (bitreverse c1) -> c2 5802 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5803 return DAG.getNode(ISD::BITREVERSE, SDLoc(N), VT, N0); 5804 // fold (bitreverse (bitreverse x)) -> x 5805 if (N0.getOpcode() == ISD::BITREVERSE) 5806 return N0.getOperand(0); 5807 return SDValue(); 5808 } 5809 5810 SDValue DAGCombiner::visitCTLZ(SDNode *N) { 5811 SDValue N0 = N->getOperand(0); 5812 EVT VT = N->getValueType(0); 5813 5814 // fold (ctlz c1) -> c2 5815 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5816 return DAG.getNode(ISD::CTLZ, SDLoc(N), VT, N0); 5817 return SDValue(); 5818 } 5819 5820 SDValue DAGCombiner::visitCTLZ_ZERO_UNDEF(SDNode *N) { 5821 SDValue N0 = N->getOperand(0); 5822 EVT VT = N->getValueType(0); 5823 5824 // fold (ctlz_zero_undef c1) -> c2 5825 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5826 return DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SDLoc(N), VT, N0); 5827 return SDValue(); 5828 } 5829 5830 SDValue DAGCombiner::visitCTTZ(SDNode *N) { 5831 SDValue N0 = N->getOperand(0); 5832 EVT VT = N->getValueType(0); 5833 5834 // fold (cttz c1) -> c2 5835 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5836 return DAG.getNode(ISD::CTTZ, SDLoc(N), VT, N0); 5837 return SDValue(); 5838 } 5839 5840 SDValue DAGCombiner::visitCTTZ_ZERO_UNDEF(SDNode *N) { 5841 SDValue N0 = N->getOperand(0); 5842 EVT VT = N->getValueType(0); 5843 5844 // fold (cttz_zero_undef c1) -> c2 5845 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5846 return DAG.getNode(ISD::CTTZ_ZERO_UNDEF, SDLoc(N), VT, N0); 5847 return SDValue(); 5848 } 5849 5850 SDValue DAGCombiner::visitCTPOP(SDNode *N) { 5851 SDValue N0 = N->getOperand(0); 5852 EVT VT = N->getValueType(0); 5853 5854 // fold (ctpop c1) -> c2 5855 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 5856 return DAG.getNode(ISD::CTPOP, SDLoc(N), VT, N0); 5857 return SDValue(); 5858 } 5859 5860 5861 /// \brief Generate Min/Max node 5862 static SDValue combineMinNumMaxNum(const SDLoc &DL, EVT VT, SDValue LHS, 5863 SDValue RHS, SDValue True, SDValue False, 5864 ISD::CondCode CC, const TargetLowering &TLI, 5865 SelectionDAG &DAG) { 5866 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 5867 return SDValue(); 5868 5869 switch (CC) { 5870 case ISD::SETOLT: 5871 case ISD::SETOLE: 5872 case ISD::SETLT: 5873 case ISD::SETLE: 5874 case ISD::SETULT: 5875 case ISD::SETULE: { 5876 unsigned Opcode = (LHS == True) ? ISD::FMINNUM : ISD::FMAXNUM; 5877 if (TLI.isOperationLegal(Opcode, VT)) 5878 return DAG.getNode(Opcode, DL, VT, LHS, RHS); 5879 return SDValue(); 5880 } 5881 case ISD::SETOGT: 5882 case ISD::SETOGE: 5883 case ISD::SETGT: 5884 case ISD::SETGE: 5885 case ISD::SETUGT: 5886 case ISD::SETUGE: { 5887 unsigned Opcode = (LHS == True) ? ISD::FMAXNUM : ISD::FMINNUM; 5888 if (TLI.isOperationLegal(Opcode, VT)) 5889 return DAG.getNode(Opcode, DL, VT, LHS, RHS); 5890 return SDValue(); 5891 } 5892 default: 5893 return SDValue(); 5894 } 5895 } 5896 5897 SDValue DAGCombiner::foldSelectOfConstants(SDNode *N) { 5898 SDValue Cond = N->getOperand(0); 5899 SDValue N1 = N->getOperand(1); 5900 SDValue N2 = N->getOperand(2); 5901 EVT VT = N->getValueType(0); 5902 EVT CondVT = Cond.getValueType(); 5903 SDLoc DL(N); 5904 5905 if (!VT.isInteger()) 5906 return SDValue(); 5907 5908 auto *C1 = dyn_cast<ConstantSDNode>(N1); 5909 auto *C2 = dyn_cast<ConstantSDNode>(N2); 5910 if (!C1 || !C2) 5911 return SDValue(); 5912 5913 // Only do this before legalization to avoid conflicting with target-specific 5914 // transforms in the other direction (create a select from a zext/sext). There 5915 // is also a target-independent combine here in DAGCombiner in the other 5916 // direction for (select Cond, -1, 0) when the condition is not i1. 5917 if (CondVT == MVT::i1 && !LegalOperations) { 5918 if (C1->isNullValue() && C2->isOne()) { 5919 // select Cond, 0, 1 --> zext (!Cond) 5920 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1); 5921 if (VT != MVT::i1) 5922 NotCond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NotCond); 5923 return NotCond; 5924 } 5925 if (C1->isNullValue() && C2->isAllOnesValue()) { 5926 // select Cond, 0, -1 --> sext (!Cond) 5927 SDValue NotCond = DAG.getNOT(DL, Cond, MVT::i1); 5928 if (VT != MVT::i1) 5929 NotCond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, NotCond); 5930 return NotCond; 5931 } 5932 if (C1->isOne() && C2->isNullValue()) { 5933 // select Cond, 1, 0 --> zext (Cond) 5934 if (VT != MVT::i1) 5935 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond); 5936 return Cond; 5937 } 5938 if (C1->isAllOnesValue() && C2->isNullValue()) { 5939 // select Cond, -1, 0 --> sext (Cond) 5940 if (VT != MVT::i1) 5941 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond); 5942 return Cond; 5943 } 5944 5945 // For any constants that differ by 1, we can transform the select into an 5946 // extend and add. Use a target hook because some targets may prefer to 5947 // transform in the other direction. 5948 if (TLI.convertSelectOfConstantsToMath()) { 5949 if (C1->getAPIntValue() - 1 == C2->getAPIntValue()) { 5950 // select Cond, C1, C1-1 --> add (zext Cond), C1-1 5951 if (VT != MVT::i1) 5952 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond); 5953 return DAG.getNode(ISD::ADD, DL, VT, Cond, N2); 5954 } 5955 if (C1->getAPIntValue() + 1 == C2->getAPIntValue()) { 5956 // select Cond, C1, C1+1 --> add (sext Cond), C1+1 5957 if (VT != MVT::i1) 5958 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond); 5959 return DAG.getNode(ISD::ADD, DL, VT, Cond, N2); 5960 } 5961 } 5962 5963 return SDValue(); 5964 } 5965 5966 // fold (select Cond, 0, 1) -> (xor Cond, 1) 5967 // We can't do this reliably if integer based booleans have different contents 5968 // to floating point based booleans. This is because we can't tell whether we 5969 // have an integer-based boolean or a floating-point-based boolean unless we 5970 // can find the SETCC that produced it and inspect its operands. This is 5971 // fairly easy if C is the SETCC node, but it can potentially be 5972 // undiscoverable (or not reasonably discoverable). For example, it could be 5973 // in another basic block or it could require searching a complicated 5974 // expression. 5975 if (CondVT.isInteger() && 5976 TLI.getBooleanContents(false, true) == 5977 TargetLowering::ZeroOrOneBooleanContent && 5978 TLI.getBooleanContents(false, false) == 5979 TargetLowering::ZeroOrOneBooleanContent && 5980 C1->isNullValue() && C2->isOne()) { 5981 SDValue NotCond = 5982 DAG.getNode(ISD::XOR, DL, CondVT, Cond, DAG.getConstant(1, DL, CondVT)); 5983 if (VT.bitsEq(CondVT)) 5984 return NotCond; 5985 return DAG.getZExtOrTrunc(NotCond, DL, VT); 5986 } 5987 5988 return SDValue(); 5989 } 5990 5991 SDValue DAGCombiner::visitSELECT(SDNode *N) { 5992 SDValue N0 = N->getOperand(0); 5993 SDValue N1 = N->getOperand(1); 5994 SDValue N2 = N->getOperand(2); 5995 EVT VT = N->getValueType(0); 5996 EVT VT0 = N0.getValueType(); 5997 5998 // fold (select C, X, X) -> X 5999 if (N1 == N2) 6000 return N1; 6001 if (const ConstantSDNode *N0C = dyn_cast<const ConstantSDNode>(N0)) { 6002 // fold (select true, X, Y) -> X 6003 // fold (select false, X, Y) -> Y 6004 return !N0C->isNullValue() ? N1 : N2; 6005 } 6006 // fold (select X, X, Y) -> (or X, Y) 6007 // fold (select X, 1, Y) -> (or C, Y) 6008 if (VT == VT0 && VT == MVT::i1 && (N0 == N1 || isOneConstant(N1))) 6009 return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2); 6010 6011 if (SDValue V = foldSelectOfConstants(N)) 6012 return V; 6013 6014 // fold (select C, 0, X) -> (and (not C), X) 6015 if (VT == VT0 && VT == MVT::i1 && isNullConstant(N1)) { 6016 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT); 6017 AddToWorklist(NOTNode.getNode()); 6018 return DAG.getNode(ISD::AND, SDLoc(N), VT, NOTNode, N2); 6019 } 6020 // fold (select C, X, 1) -> (or (not C), X) 6021 if (VT == VT0 && VT == MVT::i1 && isOneConstant(N2)) { 6022 SDValue NOTNode = DAG.getNOT(SDLoc(N0), N0, VT); 6023 AddToWorklist(NOTNode.getNode()); 6024 return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1); 6025 } 6026 // fold (select X, Y, X) -> (and X, Y) 6027 // fold (select X, Y, 0) -> (and X, Y) 6028 if (VT == VT0 && VT == MVT::i1 && (N0 == N2 || isNullConstant(N2))) 6029 return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1); 6030 6031 // If we can fold this based on the true/false value, do so. 6032 if (SimplifySelectOps(N, N1, N2)) 6033 return SDValue(N, 0); // Don't revisit N. 6034 6035 if (VT0 == MVT::i1) { 6036 // The code in this block deals with the following 2 equivalences: 6037 // select(C0|C1, x, y) <=> select(C0, x, select(C1, x, y)) 6038 // select(C0&C1, x, y) <=> select(C0, select(C1, x, y), y) 6039 // The target can specify its preferred form with the 6040 // shouldNormalizeToSelectSequence() callback. However we always transform 6041 // to the right anyway if we find the inner select exists in the DAG anyway 6042 // and we always transform to the left side if we know that we can further 6043 // optimize the combination of the conditions. 6044 bool normalizeToSequence 6045 = TLI.shouldNormalizeToSelectSequence(*DAG.getContext(), VT); 6046 // select (and Cond0, Cond1), X, Y 6047 // -> select Cond0, (select Cond1, X, Y), Y 6048 if (N0->getOpcode() == ISD::AND && N0->hasOneUse()) { 6049 SDValue Cond0 = N0->getOperand(0); 6050 SDValue Cond1 = N0->getOperand(1); 6051 SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N), 6052 N1.getValueType(), Cond1, N1, N2); 6053 if (normalizeToSequence || !InnerSelect.use_empty()) 6054 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0, 6055 InnerSelect, N2); 6056 } 6057 // select (or Cond0, Cond1), X, Y -> select Cond0, X, (select Cond1, X, Y) 6058 if (N0->getOpcode() == ISD::OR && N0->hasOneUse()) { 6059 SDValue Cond0 = N0->getOperand(0); 6060 SDValue Cond1 = N0->getOperand(1); 6061 SDValue InnerSelect = DAG.getNode(ISD::SELECT, SDLoc(N), 6062 N1.getValueType(), Cond1, N1, N2); 6063 if (normalizeToSequence || !InnerSelect.use_empty()) 6064 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Cond0, N1, 6065 InnerSelect); 6066 } 6067 6068 // select Cond0, (select Cond1, X, Y), Y -> select (and Cond0, Cond1), X, Y 6069 if (N1->getOpcode() == ISD::SELECT && N1->hasOneUse()) { 6070 SDValue N1_0 = N1->getOperand(0); 6071 SDValue N1_1 = N1->getOperand(1); 6072 SDValue N1_2 = N1->getOperand(2); 6073 if (N1_2 == N2 && N0.getValueType() == N1_0.getValueType()) { 6074 // Create the actual and node if we can generate good code for it. 6075 if (!normalizeToSequence) { 6076 SDValue And = DAG.getNode(ISD::AND, SDLoc(N), N0.getValueType(), 6077 N0, N1_0); 6078 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), And, 6079 N1_1, N2); 6080 } 6081 // Otherwise see if we can optimize the "and" to a better pattern. 6082 if (SDValue Combined = visitANDLike(N0, N1_0, N)) 6083 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined, 6084 N1_1, N2); 6085 } 6086 } 6087 // select Cond0, X, (select Cond1, X, Y) -> select (or Cond0, Cond1), X, Y 6088 if (N2->getOpcode() == ISD::SELECT && N2->hasOneUse()) { 6089 SDValue N2_0 = N2->getOperand(0); 6090 SDValue N2_1 = N2->getOperand(1); 6091 SDValue N2_2 = N2->getOperand(2); 6092 if (N2_1 == N1 && N0.getValueType() == N2_0.getValueType()) { 6093 // Create the actual or node if we can generate good code for it. 6094 if (!normalizeToSequence) { 6095 SDValue Or = DAG.getNode(ISD::OR, SDLoc(N), N0.getValueType(), 6096 N0, N2_0); 6097 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Or, 6098 N1, N2_2); 6099 } 6100 // Otherwise see if we can optimize to a better pattern. 6101 if (SDValue Combined = visitORLike(N0, N2_0, N)) 6102 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), Combined, 6103 N1, N2_2); 6104 } 6105 } 6106 } 6107 6108 // select (xor Cond, 1), X, Y -> select Cond, Y, X 6109 if (VT0 == MVT::i1) { 6110 if (N0->getOpcode() == ISD::XOR) { 6111 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) { 6112 SDValue Cond0 = N0->getOperand(0); 6113 if (C->isOne()) 6114 return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(), 6115 Cond0, N2, N1); 6116 } 6117 } 6118 } 6119 6120 // fold selects based on a setcc into other things, such as min/max/abs 6121 if (N0.getOpcode() == ISD::SETCC) { 6122 // select x, y (fcmp lt x, y) -> fminnum x, y 6123 // select x, y (fcmp gt x, y) -> fmaxnum x, y 6124 // 6125 // This is OK if we don't care about what happens if either operand is a 6126 // NaN. 6127 // 6128 6129 // FIXME: Instead of testing for UnsafeFPMath, this should be checking for 6130 // no signed zeros as well as no nans. 6131 const TargetOptions &Options = DAG.getTarget().Options; 6132 if (Options.UnsafeFPMath && 6133 VT.isFloatingPoint() && N0.hasOneUse() && 6134 DAG.isKnownNeverNaN(N1) && DAG.isKnownNeverNaN(N2)) { 6135 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 6136 6137 if (SDValue FMinMax = combineMinNumMaxNum(SDLoc(N), VT, N0.getOperand(0), 6138 N0.getOperand(1), N1, N2, CC, 6139 TLI, DAG)) 6140 return FMinMax; 6141 } 6142 6143 if ((!LegalOperations && 6144 TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT)) || 6145 TLI.isOperationLegal(ISD::SELECT_CC, VT)) 6146 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), VT, 6147 N0.getOperand(0), N0.getOperand(1), 6148 N1, N2, N0.getOperand(2)); 6149 return SimplifySelect(SDLoc(N), N0, N1, N2); 6150 } 6151 6152 return SDValue(); 6153 } 6154 6155 static 6156 std::pair<SDValue, SDValue> SplitVSETCC(const SDNode *N, SelectionDAG &DAG) { 6157 SDLoc DL(N); 6158 EVT LoVT, HiVT; 6159 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 6160 6161 // Split the inputs. 6162 SDValue Lo, Hi, LL, LH, RL, RH; 6163 std::tie(LL, LH) = DAG.SplitVectorOperand(N, 0); 6164 std::tie(RL, RH) = DAG.SplitVectorOperand(N, 1); 6165 6166 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, LL, RL, N->getOperand(2)); 6167 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, LH, RH, N->getOperand(2)); 6168 6169 return std::make_pair(Lo, Hi); 6170 } 6171 6172 // This function assumes all the vselect's arguments are CONCAT_VECTOR 6173 // nodes and that the condition is a BV of ConstantSDNodes (or undefs). 6174 static SDValue ConvertSelectToConcatVector(SDNode *N, SelectionDAG &DAG) { 6175 SDLoc DL(N); 6176 SDValue Cond = N->getOperand(0); 6177 SDValue LHS = N->getOperand(1); 6178 SDValue RHS = N->getOperand(2); 6179 EVT VT = N->getValueType(0); 6180 int NumElems = VT.getVectorNumElements(); 6181 assert(LHS.getOpcode() == ISD::CONCAT_VECTORS && 6182 RHS.getOpcode() == ISD::CONCAT_VECTORS && 6183 Cond.getOpcode() == ISD::BUILD_VECTOR); 6184 6185 // CONCAT_VECTOR can take an arbitrary number of arguments. We only care about 6186 // binary ones here. 6187 if (LHS->getNumOperands() != 2 || RHS->getNumOperands() != 2) 6188 return SDValue(); 6189 6190 // We're sure we have an even number of elements due to the 6191 // concat_vectors we have as arguments to vselect. 6192 // Skip BV elements until we find one that's not an UNDEF 6193 // After we find an UNDEF element, keep looping until we get to half the 6194 // length of the BV and see if all the non-undef nodes are the same. 6195 ConstantSDNode *BottomHalf = nullptr; 6196 for (int i = 0; i < NumElems / 2; ++i) { 6197 if (Cond->getOperand(i)->isUndef()) 6198 continue; 6199 6200 if (BottomHalf == nullptr) 6201 BottomHalf = cast<ConstantSDNode>(Cond.getOperand(i)); 6202 else if (Cond->getOperand(i).getNode() != BottomHalf) 6203 return SDValue(); 6204 } 6205 6206 // Do the same for the second half of the BuildVector 6207 ConstantSDNode *TopHalf = nullptr; 6208 for (int i = NumElems / 2; i < NumElems; ++i) { 6209 if (Cond->getOperand(i)->isUndef()) 6210 continue; 6211 6212 if (TopHalf == nullptr) 6213 TopHalf = cast<ConstantSDNode>(Cond.getOperand(i)); 6214 else if (Cond->getOperand(i).getNode() != TopHalf) 6215 return SDValue(); 6216 } 6217 6218 assert(TopHalf && BottomHalf && 6219 "One half of the selector was all UNDEFs and the other was all the " 6220 "same value. This should have been addressed before this function."); 6221 return DAG.getNode( 6222 ISD::CONCAT_VECTORS, DL, VT, 6223 BottomHalf->isNullValue() ? RHS->getOperand(0) : LHS->getOperand(0), 6224 TopHalf->isNullValue() ? RHS->getOperand(1) : LHS->getOperand(1)); 6225 } 6226 6227 SDValue DAGCombiner::visitMSCATTER(SDNode *N) { 6228 6229 if (Level >= AfterLegalizeTypes) 6230 return SDValue(); 6231 6232 MaskedScatterSDNode *MSC = cast<MaskedScatterSDNode>(N); 6233 SDValue Mask = MSC->getMask(); 6234 SDValue Data = MSC->getValue(); 6235 SDLoc DL(N); 6236 6237 // If the MSCATTER data type requires splitting and the mask is provided by a 6238 // SETCC, then split both nodes and its operands before legalization. This 6239 // prevents the type legalizer from unrolling SETCC into scalar comparisons 6240 // and enables future optimizations (e.g. min/max pattern matching on X86). 6241 if (Mask.getOpcode() != ISD::SETCC) 6242 return SDValue(); 6243 6244 // Check if any splitting is required. 6245 if (TLI.getTypeAction(*DAG.getContext(), Data.getValueType()) != 6246 TargetLowering::TypeSplitVector) 6247 return SDValue(); 6248 SDValue MaskLo, MaskHi, Lo, Hi; 6249 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 6250 6251 EVT LoVT, HiVT; 6252 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MSC->getValueType(0)); 6253 6254 SDValue Chain = MSC->getChain(); 6255 6256 EVT MemoryVT = MSC->getMemoryVT(); 6257 unsigned Alignment = MSC->getOriginalAlignment(); 6258 6259 EVT LoMemVT, HiMemVT; 6260 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 6261 6262 SDValue DataLo, DataHi; 6263 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL); 6264 6265 SDValue BasePtr = MSC->getBasePtr(); 6266 SDValue IndexLo, IndexHi; 6267 std::tie(IndexLo, IndexHi) = DAG.SplitVector(MSC->getIndex(), DL); 6268 6269 MachineMemOperand *MMO = DAG.getMachineFunction(). 6270 getMachineMemOperand(MSC->getPointerInfo(), 6271 MachineMemOperand::MOStore, LoMemVT.getStoreSize(), 6272 Alignment, MSC->getAAInfo(), MSC->getRanges()); 6273 6274 SDValue OpsLo[] = { Chain, DataLo, MaskLo, BasePtr, IndexLo }; 6275 Lo = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataLo.getValueType(), 6276 DL, OpsLo, MMO); 6277 6278 SDValue OpsHi[] = {Chain, DataHi, MaskHi, BasePtr, IndexHi}; 6279 Hi = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), DataHi.getValueType(), 6280 DL, OpsHi, MMO); 6281 6282 AddToWorklist(Lo.getNode()); 6283 AddToWorklist(Hi.getNode()); 6284 6285 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 6286 } 6287 6288 SDValue DAGCombiner::visitMSTORE(SDNode *N) { 6289 6290 if (Level >= AfterLegalizeTypes) 6291 return SDValue(); 6292 6293 MaskedStoreSDNode *MST = dyn_cast<MaskedStoreSDNode>(N); 6294 SDValue Mask = MST->getMask(); 6295 SDValue Data = MST->getValue(); 6296 EVT VT = Data.getValueType(); 6297 SDLoc DL(N); 6298 6299 // If the MSTORE data type requires splitting and the mask is provided by a 6300 // SETCC, then split both nodes and its operands before legalization. This 6301 // prevents the type legalizer from unrolling SETCC into scalar comparisons 6302 // and enables future optimizations (e.g. min/max pattern matching on X86). 6303 if (Mask.getOpcode() == ISD::SETCC) { 6304 6305 // Check if any splitting is required. 6306 if (TLI.getTypeAction(*DAG.getContext(), VT) != 6307 TargetLowering::TypeSplitVector) 6308 return SDValue(); 6309 6310 SDValue MaskLo, MaskHi, Lo, Hi; 6311 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 6312 6313 SDValue Chain = MST->getChain(); 6314 SDValue Ptr = MST->getBasePtr(); 6315 6316 EVT MemoryVT = MST->getMemoryVT(); 6317 unsigned Alignment = MST->getOriginalAlignment(); 6318 6319 // if Alignment is equal to the vector size, 6320 // take the half of it for the second part 6321 unsigned SecondHalfAlignment = 6322 (Alignment == VT.getSizeInBits() / 8) ? Alignment / 2 : Alignment; 6323 6324 EVT LoMemVT, HiMemVT; 6325 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 6326 6327 SDValue DataLo, DataHi; 6328 std::tie(DataLo, DataHi) = DAG.SplitVector(Data, DL); 6329 6330 MachineMemOperand *MMO = DAG.getMachineFunction(). 6331 getMachineMemOperand(MST->getPointerInfo(), 6332 MachineMemOperand::MOStore, LoMemVT.getStoreSize(), 6333 Alignment, MST->getAAInfo(), MST->getRanges()); 6334 6335 Lo = DAG.getMaskedStore(Chain, DL, DataLo, Ptr, MaskLo, LoMemVT, MMO, 6336 MST->isTruncatingStore(), 6337 MST->isCompressingStore()); 6338 6339 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG, 6340 MST->isCompressingStore()); 6341 6342 MMO = DAG.getMachineFunction(). 6343 getMachineMemOperand(MST->getPointerInfo(), 6344 MachineMemOperand::MOStore, HiMemVT.getStoreSize(), 6345 SecondHalfAlignment, MST->getAAInfo(), 6346 MST->getRanges()); 6347 6348 Hi = DAG.getMaskedStore(Chain, DL, DataHi, Ptr, MaskHi, HiMemVT, MMO, 6349 MST->isTruncatingStore(), 6350 MST->isCompressingStore()); 6351 6352 AddToWorklist(Lo.getNode()); 6353 AddToWorklist(Hi.getNode()); 6354 6355 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo, Hi); 6356 } 6357 return SDValue(); 6358 } 6359 6360 SDValue DAGCombiner::visitMGATHER(SDNode *N) { 6361 6362 if (Level >= AfterLegalizeTypes) 6363 return SDValue(); 6364 6365 MaskedGatherSDNode *MGT = dyn_cast<MaskedGatherSDNode>(N); 6366 SDValue Mask = MGT->getMask(); 6367 SDLoc DL(N); 6368 6369 // If the MGATHER result requires splitting and the mask is provided by a 6370 // SETCC, then split both nodes and its operands before legalization. This 6371 // prevents the type legalizer from unrolling SETCC into scalar comparisons 6372 // and enables future optimizations (e.g. min/max pattern matching on X86). 6373 6374 if (Mask.getOpcode() != ISD::SETCC) 6375 return SDValue(); 6376 6377 EVT VT = N->getValueType(0); 6378 6379 // Check if any splitting is required. 6380 if (TLI.getTypeAction(*DAG.getContext(), VT) != 6381 TargetLowering::TypeSplitVector) 6382 return SDValue(); 6383 6384 SDValue MaskLo, MaskHi, Lo, Hi; 6385 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 6386 6387 SDValue Src0 = MGT->getValue(); 6388 SDValue Src0Lo, Src0Hi; 6389 std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL); 6390 6391 EVT LoVT, HiVT; 6392 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 6393 6394 SDValue Chain = MGT->getChain(); 6395 EVT MemoryVT = MGT->getMemoryVT(); 6396 unsigned Alignment = MGT->getOriginalAlignment(); 6397 6398 EVT LoMemVT, HiMemVT; 6399 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 6400 6401 SDValue BasePtr = MGT->getBasePtr(); 6402 SDValue Index = MGT->getIndex(); 6403 SDValue IndexLo, IndexHi; 6404 std::tie(IndexLo, IndexHi) = DAG.SplitVector(Index, DL); 6405 6406 MachineMemOperand *MMO = DAG.getMachineFunction(). 6407 getMachineMemOperand(MGT->getPointerInfo(), 6408 MachineMemOperand::MOLoad, LoMemVT.getStoreSize(), 6409 Alignment, MGT->getAAInfo(), MGT->getRanges()); 6410 6411 SDValue OpsLo[] = { Chain, Src0Lo, MaskLo, BasePtr, IndexLo }; 6412 Lo = DAG.getMaskedGather(DAG.getVTList(LoVT, MVT::Other), LoVT, DL, OpsLo, 6413 MMO); 6414 6415 SDValue OpsHi[] = {Chain, Src0Hi, MaskHi, BasePtr, IndexHi}; 6416 Hi = DAG.getMaskedGather(DAG.getVTList(HiVT, MVT::Other), HiVT, DL, OpsHi, 6417 MMO); 6418 6419 AddToWorklist(Lo.getNode()); 6420 AddToWorklist(Hi.getNode()); 6421 6422 // Build a factor node to remember that this load is independent of the 6423 // other one. 6424 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1), 6425 Hi.getValue(1)); 6426 6427 // Legalized the chain result - switch anything that used the old chain to 6428 // use the new one. 6429 DAG.ReplaceAllUsesOfValueWith(SDValue(MGT, 1), Chain); 6430 6431 SDValue GatherRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); 6432 6433 SDValue RetOps[] = { GatherRes, Chain }; 6434 return DAG.getMergeValues(RetOps, DL); 6435 } 6436 6437 SDValue DAGCombiner::visitMLOAD(SDNode *N) { 6438 6439 if (Level >= AfterLegalizeTypes) 6440 return SDValue(); 6441 6442 MaskedLoadSDNode *MLD = dyn_cast<MaskedLoadSDNode>(N); 6443 SDValue Mask = MLD->getMask(); 6444 SDLoc DL(N); 6445 6446 // If the MLOAD result requires splitting and the mask is provided by a 6447 // SETCC, then split both nodes and its operands before legalization. This 6448 // prevents the type legalizer from unrolling SETCC into scalar comparisons 6449 // and enables future optimizations (e.g. min/max pattern matching on X86). 6450 6451 if (Mask.getOpcode() == ISD::SETCC) { 6452 EVT VT = N->getValueType(0); 6453 6454 // Check if any splitting is required. 6455 if (TLI.getTypeAction(*DAG.getContext(), VT) != 6456 TargetLowering::TypeSplitVector) 6457 return SDValue(); 6458 6459 SDValue MaskLo, MaskHi, Lo, Hi; 6460 std::tie(MaskLo, MaskHi) = SplitVSETCC(Mask.getNode(), DAG); 6461 6462 SDValue Src0 = MLD->getSrc0(); 6463 SDValue Src0Lo, Src0Hi; 6464 std::tie(Src0Lo, Src0Hi) = DAG.SplitVector(Src0, DL); 6465 6466 EVT LoVT, HiVT; 6467 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(MLD->getValueType(0)); 6468 6469 SDValue Chain = MLD->getChain(); 6470 SDValue Ptr = MLD->getBasePtr(); 6471 EVT MemoryVT = MLD->getMemoryVT(); 6472 unsigned Alignment = MLD->getOriginalAlignment(); 6473 6474 // if Alignment is equal to the vector size, 6475 // take the half of it for the second part 6476 unsigned SecondHalfAlignment = 6477 (Alignment == MLD->getValueType(0).getSizeInBits()/8) ? 6478 Alignment/2 : Alignment; 6479 6480 EVT LoMemVT, HiMemVT; 6481 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemoryVT); 6482 6483 MachineMemOperand *MMO = DAG.getMachineFunction(). 6484 getMachineMemOperand(MLD->getPointerInfo(), 6485 MachineMemOperand::MOLoad, LoMemVT.getStoreSize(), 6486 Alignment, MLD->getAAInfo(), MLD->getRanges()); 6487 6488 Lo = DAG.getMaskedLoad(LoVT, DL, Chain, Ptr, MaskLo, Src0Lo, LoMemVT, MMO, 6489 ISD::NON_EXTLOAD, MLD->isExpandingLoad()); 6490 6491 Ptr = TLI.IncrementMemoryAddress(Ptr, MaskLo, DL, LoMemVT, DAG, 6492 MLD->isExpandingLoad()); 6493 6494 MMO = DAG.getMachineFunction(). 6495 getMachineMemOperand(MLD->getPointerInfo(), 6496 MachineMemOperand::MOLoad, HiMemVT.getStoreSize(), 6497 SecondHalfAlignment, MLD->getAAInfo(), MLD->getRanges()); 6498 6499 Hi = DAG.getMaskedLoad(HiVT, DL, Chain, Ptr, MaskHi, Src0Hi, HiMemVT, MMO, 6500 ISD::NON_EXTLOAD, MLD->isExpandingLoad()); 6501 6502 AddToWorklist(Lo.getNode()); 6503 AddToWorklist(Hi.getNode()); 6504 6505 // Build a factor node to remember that this load is independent of the 6506 // other one. 6507 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Lo.getValue(1), 6508 Hi.getValue(1)); 6509 6510 // Legalized the chain result - switch anything that used the old chain to 6511 // use the new one. 6512 DAG.ReplaceAllUsesOfValueWith(SDValue(MLD, 1), Chain); 6513 6514 SDValue LoadRes = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi); 6515 6516 SDValue RetOps[] = { LoadRes, Chain }; 6517 return DAG.getMergeValues(RetOps, DL); 6518 } 6519 return SDValue(); 6520 } 6521 6522 SDValue DAGCombiner::visitVSELECT(SDNode *N) { 6523 SDValue N0 = N->getOperand(0); 6524 SDValue N1 = N->getOperand(1); 6525 SDValue N2 = N->getOperand(2); 6526 SDLoc DL(N); 6527 6528 // fold (vselect C, X, X) -> X 6529 if (N1 == N2) 6530 return N1; 6531 6532 // Canonicalize integer abs. 6533 // vselect (setg[te] X, 0), X, -X -> 6534 // vselect (setgt X, -1), X, -X -> 6535 // vselect (setl[te] X, 0), -X, X -> 6536 // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 6537 if (N0.getOpcode() == ISD::SETCC) { 6538 SDValue LHS = N0.getOperand(0), RHS = N0.getOperand(1); 6539 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 6540 bool isAbs = false; 6541 bool RHSIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode()); 6542 6543 if (((RHSIsAllZeros && (CC == ISD::SETGT || CC == ISD::SETGE)) || 6544 (ISD::isBuildVectorAllOnes(RHS.getNode()) && CC == ISD::SETGT)) && 6545 N1 == LHS && N2.getOpcode() == ISD::SUB && N1 == N2.getOperand(1)) 6546 isAbs = ISD::isBuildVectorAllZeros(N2.getOperand(0).getNode()); 6547 else if ((RHSIsAllZeros && (CC == ISD::SETLT || CC == ISD::SETLE)) && 6548 N2 == LHS && N1.getOpcode() == ISD::SUB && N2 == N1.getOperand(1)) 6549 isAbs = ISD::isBuildVectorAllZeros(N1.getOperand(0).getNode()); 6550 6551 if (isAbs) { 6552 EVT VT = LHS.getValueType(); 6553 SDValue Shift = DAG.getNode( 6554 ISD::SRA, DL, VT, LHS, 6555 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT)); 6556 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, LHS, Shift); 6557 AddToWorklist(Shift.getNode()); 6558 AddToWorklist(Add.getNode()); 6559 return DAG.getNode(ISD::XOR, DL, VT, Add, Shift); 6560 } 6561 } 6562 6563 if (SimplifySelectOps(N, N1, N2)) 6564 return SDValue(N, 0); // Don't revisit N. 6565 6566 // Fold (vselect (build_vector all_ones), N1, N2) -> N1 6567 if (ISD::isBuildVectorAllOnes(N0.getNode())) 6568 return N1; 6569 // Fold (vselect (build_vector all_zeros), N1, N2) -> N2 6570 if (ISD::isBuildVectorAllZeros(N0.getNode())) 6571 return N2; 6572 6573 // The ConvertSelectToConcatVector function is assuming both the above 6574 // checks for (vselect (build_vector all{ones,zeros) ...) have been made 6575 // and addressed. 6576 if (N1.getOpcode() == ISD::CONCAT_VECTORS && 6577 N2.getOpcode() == ISD::CONCAT_VECTORS && 6578 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) { 6579 if (SDValue CV = ConvertSelectToConcatVector(N, DAG)) 6580 return CV; 6581 } 6582 6583 return SDValue(); 6584 } 6585 6586 SDValue DAGCombiner::visitSELECT_CC(SDNode *N) { 6587 SDValue N0 = N->getOperand(0); 6588 SDValue N1 = N->getOperand(1); 6589 SDValue N2 = N->getOperand(2); 6590 SDValue N3 = N->getOperand(3); 6591 SDValue N4 = N->getOperand(4); 6592 ISD::CondCode CC = cast<CondCodeSDNode>(N4)->get(); 6593 6594 // fold select_cc lhs, rhs, x, x, cc -> x 6595 if (N2 == N3) 6596 return N2; 6597 6598 // Determine if the condition we're dealing with is constant 6599 if (SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), N0, N1, 6600 CC, SDLoc(N), false)) { 6601 AddToWorklist(SCC.getNode()); 6602 6603 if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) { 6604 if (!SCCC->isNullValue()) 6605 return N2; // cond always true -> true val 6606 else 6607 return N3; // cond always false -> false val 6608 } else if (SCC->isUndef()) { 6609 // When the condition is UNDEF, just return the first operand. This is 6610 // coherent the DAG creation, no setcc node is created in this case 6611 return N2; 6612 } else if (SCC.getOpcode() == ISD::SETCC) { 6613 // Fold to a simpler select_cc 6614 return DAG.getNode(ISD::SELECT_CC, SDLoc(N), N2.getValueType(), 6615 SCC.getOperand(0), SCC.getOperand(1), N2, N3, 6616 SCC.getOperand(2)); 6617 } 6618 } 6619 6620 // If we can fold this based on the true/false value, do so. 6621 if (SimplifySelectOps(N, N2, N3)) 6622 return SDValue(N, 0); // Don't revisit N. 6623 6624 // fold select_cc into other things, such as min/max/abs 6625 return SimplifySelectCC(SDLoc(N), N0, N1, N2, N3, CC); 6626 } 6627 6628 SDValue DAGCombiner::visitSETCC(SDNode *N) { 6629 return SimplifySetCC(N->getValueType(0), N->getOperand(0), N->getOperand(1), 6630 cast<CondCodeSDNode>(N->getOperand(2))->get(), 6631 SDLoc(N)); 6632 } 6633 6634 SDValue DAGCombiner::visitSETCCE(SDNode *N) { 6635 SDValue LHS = N->getOperand(0); 6636 SDValue RHS = N->getOperand(1); 6637 SDValue Carry = N->getOperand(2); 6638 SDValue Cond = N->getOperand(3); 6639 6640 // If Carry is false, fold to a regular SETCC. 6641 if (Carry.getOpcode() == ISD::CARRY_FALSE) 6642 return DAG.getNode(ISD::SETCC, SDLoc(N), N->getVTList(), LHS, RHS, Cond); 6643 6644 return SDValue(); 6645 } 6646 6647 /// Try to fold a sext/zext/aext dag node into a ConstantSDNode or 6648 /// a build_vector of constants. 6649 /// This function is called by the DAGCombiner when visiting sext/zext/aext 6650 /// dag nodes (see for example method DAGCombiner::visitSIGN_EXTEND). 6651 /// Vector extends are not folded if operations are legal; this is to 6652 /// avoid introducing illegal build_vector dag nodes. 6653 static SDNode *tryToFoldExtendOfConstant(SDNode *N, const TargetLowering &TLI, 6654 SelectionDAG &DAG, bool LegalTypes, 6655 bool LegalOperations) { 6656 unsigned Opcode = N->getOpcode(); 6657 SDValue N0 = N->getOperand(0); 6658 EVT VT = N->getValueType(0); 6659 6660 assert((Opcode == ISD::SIGN_EXTEND || Opcode == ISD::ZERO_EXTEND || 6661 Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG || 6662 Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) 6663 && "Expected EXTEND dag node in input!"); 6664 6665 // fold (sext c1) -> c1 6666 // fold (zext c1) -> c1 6667 // fold (aext c1) -> c1 6668 if (isa<ConstantSDNode>(N0)) 6669 return DAG.getNode(Opcode, SDLoc(N), VT, N0).getNode(); 6670 6671 // fold (sext (build_vector AllConstants) -> (build_vector AllConstants) 6672 // fold (zext (build_vector AllConstants) -> (build_vector AllConstants) 6673 // fold (aext (build_vector AllConstants) -> (build_vector AllConstants) 6674 EVT SVT = VT.getScalarType(); 6675 if (!(VT.isVector() && 6676 (!LegalTypes || (!LegalOperations && TLI.isTypeLegal(SVT))) && 6677 ISD::isBuildVectorOfConstantSDNodes(N0.getNode()))) 6678 return nullptr; 6679 6680 // We can fold this node into a build_vector. 6681 unsigned VTBits = SVT.getSizeInBits(); 6682 unsigned EVTBits = N0->getValueType(0).getScalarSizeInBits(); 6683 SmallVector<SDValue, 8> Elts; 6684 unsigned NumElts = VT.getVectorNumElements(); 6685 SDLoc DL(N); 6686 6687 for (unsigned i=0; i != NumElts; ++i) { 6688 SDValue Op = N0->getOperand(i); 6689 if (Op->isUndef()) { 6690 Elts.push_back(DAG.getUNDEF(SVT)); 6691 continue; 6692 } 6693 6694 SDLoc DL(Op); 6695 // Get the constant value and if needed trunc it to the size of the type. 6696 // Nodes like build_vector might have constants wider than the scalar type. 6697 APInt C = cast<ConstantSDNode>(Op)->getAPIntValue().zextOrTrunc(EVTBits); 6698 if (Opcode == ISD::SIGN_EXTEND || Opcode == ISD::SIGN_EXTEND_VECTOR_INREG) 6699 Elts.push_back(DAG.getConstant(C.sext(VTBits), DL, SVT)); 6700 else 6701 Elts.push_back(DAG.getConstant(C.zext(VTBits), DL, SVT)); 6702 } 6703 6704 return DAG.getBuildVector(VT, DL, Elts).getNode(); 6705 } 6706 6707 // ExtendUsesToFormExtLoad - Trying to extend uses of a load to enable this: 6708 // "fold ({s|z|a}ext (load x)) -> ({s|z|a}ext (truncate ({s|z|a}extload x)))" 6709 // transformation. Returns true if extension are possible and the above 6710 // mentioned transformation is profitable. 6711 static bool ExtendUsesToFormExtLoad(SDNode *N, SDValue N0, 6712 unsigned ExtOpc, 6713 SmallVectorImpl<SDNode *> &ExtendNodes, 6714 const TargetLowering &TLI) { 6715 bool HasCopyToRegUses = false; 6716 bool isTruncFree = TLI.isTruncateFree(N->getValueType(0), N0.getValueType()); 6717 for (SDNode::use_iterator UI = N0.getNode()->use_begin(), 6718 UE = N0.getNode()->use_end(); 6719 UI != UE; ++UI) { 6720 SDNode *User = *UI; 6721 if (User == N) 6722 continue; 6723 if (UI.getUse().getResNo() != N0.getResNo()) 6724 continue; 6725 // FIXME: Only extend SETCC N, N and SETCC N, c for now. 6726 if (ExtOpc != ISD::ANY_EXTEND && User->getOpcode() == ISD::SETCC) { 6727 ISD::CondCode CC = cast<CondCodeSDNode>(User->getOperand(2))->get(); 6728 if (ExtOpc == ISD::ZERO_EXTEND && ISD::isSignedIntSetCC(CC)) 6729 // Sign bits will be lost after a zext. 6730 return false; 6731 bool Add = false; 6732 for (unsigned i = 0; i != 2; ++i) { 6733 SDValue UseOp = User->getOperand(i); 6734 if (UseOp == N0) 6735 continue; 6736 if (!isa<ConstantSDNode>(UseOp)) 6737 return false; 6738 Add = true; 6739 } 6740 if (Add) 6741 ExtendNodes.push_back(User); 6742 continue; 6743 } 6744 // If truncates aren't free and there are users we can't 6745 // extend, it isn't worthwhile. 6746 if (!isTruncFree) 6747 return false; 6748 // Remember if this value is live-out. 6749 if (User->getOpcode() == ISD::CopyToReg) 6750 HasCopyToRegUses = true; 6751 } 6752 6753 if (HasCopyToRegUses) { 6754 bool BothLiveOut = false; 6755 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6756 UI != UE; ++UI) { 6757 SDUse &Use = UI.getUse(); 6758 if (Use.getResNo() == 0 && Use.getUser()->getOpcode() == ISD::CopyToReg) { 6759 BothLiveOut = true; 6760 break; 6761 } 6762 } 6763 if (BothLiveOut) 6764 // Both unextended and extended values are live out. There had better be 6765 // a good reason for the transformation. 6766 return ExtendNodes.size(); 6767 } 6768 return true; 6769 } 6770 6771 void DAGCombiner::ExtendSetCCUses(const SmallVectorImpl<SDNode *> &SetCCs, 6772 SDValue Trunc, SDValue ExtLoad, 6773 const SDLoc &DL, ISD::NodeType ExtType) { 6774 // Extend SetCC uses if necessary. 6775 for (unsigned i = 0, e = SetCCs.size(); i != e; ++i) { 6776 SDNode *SetCC = SetCCs[i]; 6777 SmallVector<SDValue, 4> Ops; 6778 6779 for (unsigned j = 0; j != 2; ++j) { 6780 SDValue SOp = SetCC->getOperand(j); 6781 if (SOp == Trunc) 6782 Ops.push_back(ExtLoad); 6783 else 6784 Ops.push_back(DAG.getNode(ExtType, DL, ExtLoad->getValueType(0), SOp)); 6785 } 6786 6787 Ops.push_back(SetCC->getOperand(2)); 6788 CombineTo(SetCC, DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops)); 6789 } 6790 } 6791 6792 // FIXME: Bring more similar combines here, common to sext/zext (maybe aext?). 6793 SDValue DAGCombiner::CombineExtLoad(SDNode *N) { 6794 SDValue N0 = N->getOperand(0); 6795 EVT DstVT = N->getValueType(0); 6796 EVT SrcVT = N0.getValueType(); 6797 6798 assert((N->getOpcode() == ISD::SIGN_EXTEND || 6799 N->getOpcode() == ISD::ZERO_EXTEND) && 6800 "Unexpected node type (not an extend)!"); 6801 6802 // fold (sext (load x)) to multiple smaller sextloads; same for zext. 6803 // For example, on a target with legal v4i32, but illegal v8i32, turn: 6804 // (v8i32 (sext (v8i16 (load x)))) 6805 // into: 6806 // (v8i32 (concat_vectors (v4i32 (sextload x)), 6807 // (v4i32 (sextload (x + 16))))) 6808 // Where uses of the original load, i.e.: 6809 // (v8i16 (load x)) 6810 // are replaced with: 6811 // (v8i16 (truncate 6812 // (v8i32 (concat_vectors (v4i32 (sextload x)), 6813 // (v4i32 (sextload (x + 16))))))) 6814 // 6815 // This combine is only applicable to illegal, but splittable, vectors. 6816 // All legal types, and illegal non-vector types, are handled elsewhere. 6817 // This combine is controlled by TargetLowering::isVectorLoadExtDesirable. 6818 // 6819 if (N0->getOpcode() != ISD::LOAD) 6820 return SDValue(); 6821 6822 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6823 6824 if (!ISD::isNON_EXTLoad(LN0) || !ISD::isUNINDEXEDLoad(LN0) || 6825 !N0.hasOneUse() || LN0->isVolatile() || !DstVT.isVector() || 6826 !DstVT.isPow2VectorType() || !TLI.isVectorLoadExtDesirable(SDValue(N, 0))) 6827 return SDValue(); 6828 6829 SmallVector<SDNode *, 4> SetCCs; 6830 if (!ExtendUsesToFormExtLoad(N, N0, N->getOpcode(), SetCCs, TLI)) 6831 return SDValue(); 6832 6833 ISD::LoadExtType ExtType = 6834 N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 6835 6836 // Try to split the vector types to get down to legal types. 6837 EVT SplitSrcVT = SrcVT; 6838 EVT SplitDstVT = DstVT; 6839 while (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT) && 6840 SplitSrcVT.getVectorNumElements() > 1) { 6841 SplitDstVT = DAG.GetSplitDestVTs(SplitDstVT).first; 6842 SplitSrcVT = DAG.GetSplitDestVTs(SplitSrcVT).first; 6843 } 6844 6845 if (!TLI.isLoadExtLegalOrCustom(ExtType, SplitDstVT, SplitSrcVT)) 6846 return SDValue(); 6847 6848 SDLoc DL(N); 6849 const unsigned NumSplits = 6850 DstVT.getVectorNumElements() / SplitDstVT.getVectorNumElements(); 6851 const unsigned Stride = SplitSrcVT.getStoreSize(); 6852 SmallVector<SDValue, 4> Loads; 6853 SmallVector<SDValue, 4> Chains; 6854 6855 SDValue BasePtr = LN0->getBasePtr(); 6856 for (unsigned Idx = 0; Idx < NumSplits; Idx++) { 6857 const unsigned Offset = Idx * Stride; 6858 const unsigned Align = MinAlign(LN0->getAlignment(), Offset); 6859 6860 SDValue SplitLoad = DAG.getExtLoad( 6861 ExtType, DL, SplitDstVT, LN0->getChain(), BasePtr, 6862 LN0->getPointerInfo().getWithOffset(Offset), SplitSrcVT, Align, 6863 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 6864 6865 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, 6866 DAG.getConstant(Stride, DL, BasePtr.getValueType())); 6867 6868 Loads.push_back(SplitLoad.getValue(0)); 6869 Chains.push_back(SplitLoad.getValue(1)); 6870 } 6871 6872 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 6873 SDValue NewValue = DAG.getNode(ISD::CONCAT_VECTORS, DL, DstVT, Loads); 6874 6875 // Simplify TF. 6876 AddToWorklist(NewChain.getNode()); 6877 6878 CombineTo(N, NewValue); 6879 6880 // Replace uses of the original load (before extension) 6881 // with a truncate of the concatenated sextloaded vectors. 6882 SDValue Trunc = 6883 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), NewValue); 6884 CombineTo(N0.getNode(), Trunc, NewChain); 6885 ExtendSetCCUses(SetCCs, Trunc, NewValue, DL, 6886 (ISD::NodeType)N->getOpcode()); 6887 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6888 } 6889 6890 SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) { 6891 SDValue N0 = N->getOperand(0); 6892 EVT VT = N->getValueType(0); 6893 SDLoc DL(N); 6894 6895 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 6896 LegalOperations)) 6897 return SDValue(Res, 0); 6898 6899 // fold (sext (sext x)) -> (sext x) 6900 // fold (sext (aext x)) -> (sext x) 6901 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 6902 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, N0.getOperand(0)); 6903 6904 if (N0.getOpcode() == ISD::TRUNCATE) { 6905 // fold (sext (truncate (load x))) -> (sext (smaller load x)) 6906 // fold (sext (truncate (srl (load x), c))) -> (sext (smaller load (x+c/n))) 6907 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 6908 SDNode *oye = N0.getOperand(0).getNode(); 6909 if (NarrowLoad.getNode() != N0.getNode()) { 6910 CombineTo(N0.getNode(), NarrowLoad); 6911 // CombineTo deleted the truncate, if needed, but not what's under it. 6912 AddToWorklist(oye); 6913 } 6914 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6915 } 6916 6917 // See if the value being truncated is already sign extended. If so, just 6918 // eliminate the trunc/sext pair. 6919 SDValue Op = N0.getOperand(0); 6920 unsigned OpBits = Op.getScalarValueSizeInBits(); 6921 unsigned MidBits = N0.getScalarValueSizeInBits(); 6922 unsigned DestBits = VT.getScalarSizeInBits(); 6923 unsigned NumSignBits = DAG.ComputeNumSignBits(Op); 6924 6925 if (OpBits == DestBits) { 6926 // Op is i32, Mid is i8, and Dest is i32. If Op has more than 24 sign 6927 // bits, it is already ready. 6928 if (NumSignBits > DestBits-MidBits) 6929 return Op; 6930 } else if (OpBits < DestBits) { 6931 // Op is i32, Mid is i8, and Dest is i64. If Op has more than 24 sign 6932 // bits, just sext from i32. 6933 if (NumSignBits > OpBits-MidBits) 6934 return DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op); 6935 } else { 6936 // Op is i64, Mid is i8, and Dest is i32. If Op has more than 56 sign 6937 // bits, just truncate to i32. 6938 if (NumSignBits > OpBits-MidBits) 6939 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op); 6940 } 6941 6942 // fold (sext (truncate x)) -> (sextinreg x). 6943 if (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND_INREG, 6944 N0.getValueType())) { 6945 if (OpBits < DestBits) 6946 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N0), VT, Op); 6947 else if (OpBits > DestBits) 6948 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), VT, Op); 6949 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Op, 6950 DAG.getValueType(N0.getValueType())); 6951 } 6952 } 6953 6954 // fold (sext (load x)) -> (sext (truncate (sextload x))) 6955 // Only generate vector extloads when 1) they're legal, and 2) they are 6956 // deemed desirable by the target. 6957 if (ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 6958 ((!LegalOperations && !VT.isVector() && 6959 !cast<LoadSDNode>(N0)->isVolatile()) || 6960 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, N0.getValueType()))) { 6961 bool DoXform = true; 6962 SmallVector<SDNode*, 4> SetCCs; 6963 if (!N0.hasOneUse()) 6964 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::SIGN_EXTEND, SetCCs, TLI); 6965 if (VT.isVector()) 6966 DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0)); 6967 if (DoXform) { 6968 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6969 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, LN0->getChain(), 6970 LN0->getBasePtr(), N0.getValueType(), 6971 LN0->getMemOperand()); 6972 CombineTo(N, ExtLoad); 6973 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 6974 N0.getValueType(), ExtLoad); 6975 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 6976 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, ISD::SIGN_EXTEND); 6977 return SDValue(N, 0); // Return N so it doesn't get rechecked! 6978 } 6979 } 6980 6981 // fold (sext (load x)) to multiple smaller sextloads. 6982 // Only on illegal but splittable vectors. 6983 if (SDValue ExtLoad = CombineExtLoad(N)) 6984 return ExtLoad; 6985 6986 // fold (sext (sextload x)) -> (sext (truncate (sextload x))) 6987 // fold (sext ( extload x)) -> (sext (truncate (sextload x))) 6988 if ((ISD::isSEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 6989 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 6990 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 6991 EVT MemVT = LN0->getMemoryVT(); 6992 if ((!LegalOperations && !LN0->isVolatile()) || 6993 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, MemVT)) { 6994 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, LN0->getChain(), 6995 LN0->getBasePtr(), MemVT, 6996 LN0->getMemOperand()); 6997 CombineTo(N, ExtLoad); 6998 CombineTo(N0.getNode(), 6999 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 7000 N0.getValueType(), ExtLoad), 7001 ExtLoad.getValue(1)); 7002 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7003 } 7004 } 7005 7006 // fold (sext (and/or/xor (load x), cst)) -> 7007 // (and/or/xor (sextload x), (sext cst)) 7008 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 7009 N0.getOpcode() == ISD::XOR) && 7010 isa<LoadSDNode>(N0.getOperand(0)) && 7011 N0.getOperand(1).getOpcode() == ISD::Constant && 7012 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, N0.getValueType()) && 7013 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 7014 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 7015 if (LN0->getExtensionType() != ISD::ZEXTLOAD && LN0->isUnindexed()) { 7016 bool DoXform = true; 7017 SmallVector<SDNode*, 4> SetCCs; 7018 if (!N0.hasOneUse()) 7019 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), ISD::SIGN_EXTEND, 7020 SetCCs, TLI); 7021 if (DoXform) { 7022 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(LN0), VT, 7023 LN0->getChain(), LN0->getBasePtr(), 7024 LN0->getMemoryVT(), 7025 LN0->getMemOperand()); 7026 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 7027 Mask = Mask.sext(VT.getSizeInBits()); 7028 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT, 7029 ExtLoad, DAG.getConstant(Mask, DL, VT)); 7030 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 7031 SDLoc(N0.getOperand(0)), 7032 N0.getOperand(0).getValueType(), ExtLoad); 7033 CombineTo(N, And); 7034 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 7035 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, ISD::SIGN_EXTEND); 7036 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7037 } 7038 } 7039 } 7040 7041 if (N0.getOpcode() == ISD::SETCC) { 7042 SDValue N00 = N0.getOperand(0); 7043 SDValue N01 = N0.getOperand(1); 7044 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 7045 EVT N00VT = N0.getOperand(0).getValueType(); 7046 7047 // sext(setcc) -> sext_in_reg(vsetcc) for vectors. 7048 // Only do this before legalize for now. 7049 if (VT.isVector() && !LegalOperations && 7050 TLI.getBooleanContents(N00VT) == 7051 TargetLowering::ZeroOrNegativeOneBooleanContent) { 7052 // On some architectures (such as SSE/NEON/etc) the SETCC result type is 7053 // of the same size as the compared operands. Only optimize sext(setcc()) 7054 // if this is the case. 7055 EVT SVT = getSetCCResultType(N00VT); 7056 7057 // We know that the # elements of the results is the same as the 7058 // # elements of the compare (and the # elements of the compare result 7059 // for that matter). Check to see that they are the same size. If so, 7060 // we know that the element size of the sext'd result matches the 7061 // element size of the compare operands. 7062 if (VT.getSizeInBits() == SVT.getSizeInBits()) 7063 return DAG.getSetCC(DL, VT, N00, N01, CC); 7064 7065 // If the desired elements are smaller or larger than the source 7066 // elements, we can use a matching integer vector type and then 7067 // truncate/sign extend. 7068 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger(); 7069 if (SVT == MatchingVecType) { 7070 SDValue VsetCC = DAG.getSetCC(DL, MatchingVecType, N00, N01, CC); 7071 return DAG.getSExtOrTrunc(VsetCC, DL, VT); 7072 } 7073 } 7074 7075 // sext(setcc x, y, cc) -> (select (setcc x, y, cc), T, 0) 7076 // Here, T can be 1 or -1, depending on the type of the setcc and 7077 // getBooleanContents(). 7078 unsigned SetCCWidth = N0.getScalarValueSizeInBits(); 7079 7080 // To determine the "true" side of the select, we need to know the high bit 7081 // of the value returned by the setcc if it evaluates to true. 7082 // If the type of the setcc is i1, then the true case of the select is just 7083 // sext(i1 1), that is, -1. 7084 // If the type of the setcc is larger (say, i8) then the value of the high 7085 // bit depends on getBooleanContents(), so ask TLI for a real "true" value 7086 // of the appropriate width. 7087 SDValue ExtTrueVal = (SetCCWidth == 1) ? DAG.getAllOnesConstant(DL, VT) 7088 : TLI.getConstTrueVal(DAG, VT, DL); 7089 SDValue Zero = DAG.getConstant(0, DL, VT); 7090 if (SDValue SCC = 7091 SimplifySelectCC(DL, N00, N01, ExtTrueVal, Zero, CC, true)) 7092 return SCC; 7093 7094 if (!VT.isVector()) { 7095 EVT SetCCVT = getSetCCResultType(N00VT); 7096 // Don't do this transform for i1 because there's a select transform 7097 // that would reverse it. 7098 // TODO: We should not do this transform at all without a target hook 7099 // because a sext is likely cheaper than a select? 7100 if (SetCCVT.getScalarSizeInBits() != 1 && 7101 (!LegalOperations || TLI.isOperationLegal(ISD::SETCC, N00VT))) { 7102 SDValue SetCC = DAG.getSetCC(DL, SetCCVT, N00, N01, CC); 7103 return DAG.getSelect(DL, VT, SetCC, ExtTrueVal, Zero); 7104 } 7105 } 7106 } 7107 7108 // fold (sext x) -> (zext x) if the sign bit is known zero. 7109 if ((!LegalOperations || TLI.isOperationLegal(ISD::ZERO_EXTEND, VT)) && 7110 DAG.SignBitIsZero(N0)) 7111 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0); 7112 7113 return SDValue(); 7114 } 7115 7116 // isTruncateOf - If N is a truncate of some other value, return true, record 7117 // the value being truncated in Op and which of Op's bits are zero in KnownZero. 7118 // This function computes KnownZero to avoid a duplicated call to 7119 // computeKnownBits in the caller. 7120 static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op, 7121 APInt &KnownZero) { 7122 APInt KnownOne; 7123 if (N->getOpcode() == ISD::TRUNCATE) { 7124 Op = N->getOperand(0); 7125 DAG.computeKnownBits(Op, KnownZero, KnownOne); 7126 return true; 7127 } 7128 7129 if (N->getOpcode() != ISD::SETCC || N->getValueType(0) != MVT::i1 || 7130 cast<CondCodeSDNode>(N->getOperand(2))->get() != ISD::SETNE) 7131 return false; 7132 7133 SDValue Op0 = N->getOperand(0); 7134 SDValue Op1 = N->getOperand(1); 7135 assert(Op0.getValueType() == Op1.getValueType()); 7136 7137 if (isNullConstant(Op0)) 7138 Op = Op1; 7139 else if (isNullConstant(Op1)) 7140 Op = Op0; 7141 else 7142 return false; 7143 7144 DAG.computeKnownBits(Op, KnownZero, KnownOne); 7145 7146 if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue()) 7147 return false; 7148 7149 return true; 7150 } 7151 7152 SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) { 7153 SDValue N0 = N->getOperand(0); 7154 EVT VT = N->getValueType(0); 7155 7156 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 7157 LegalOperations)) 7158 return SDValue(Res, 0); 7159 7160 // fold (zext (zext x)) -> (zext x) 7161 // fold (zext (aext x)) -> (zext x) 7162 if (N0.getOpcode() == ISD::ZERO_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) 7163 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, 7164 N0.getOperand(0)); 7165 7166 // fold (zext (truncate x)) -> (zext x) or 7167 // (zext (truncate x)) -> (truncate x) 7168 // This is valid when the truncated bits of x are already zero. 7169 // FIXME: We should extend this to work for vectors too. 7170 SDValue Op; 7171 APInt KnownZero; 7172 if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) { 7173 APInt TruncatedBits = 7174 (Op.getValueSizeInBits() == N0.getValueSizeInBits()) ? 7175 APInt(Op.getValueSizeInBits(), 0) : 7176 APInt::getBitsSet(Op.getValueSizeInBits(), 7177 N0.getValueSizeInBits(), 7178 std::min(Op.getValueSizeInBits(), 7179 VT.getSizeInBits())); 7180 if (TruncatedBits == (KnownZero & TruncatedBits)) { 7181 if (VT.bitsGT(Op.getValueType())) 7182 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Op); 7183 if (VT.bitsLT(Op.getValueType())) 7184 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 7185 7186 return Op; 7187 } 7188 } 7189 7190 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 7191 // fold (zext (truncate (srl (load x), c))) -> (zext (small load (x+c/n))) 7192 if (N0.getOpcode() == ISD::TRUNCATE) { 7193 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 7194 SDNode *oye = N0.getOperand(0).getNode(); 7195 if (NarrowLoad.getNode() != N0.getNode()) { 7196 CombineTo(N0.getNode(), NarrowLoad); 7197 // CombineTo deleted the truncate, if needed, but not what's under it. 7198 AddToWorklist(oye); 7199 } 7200 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7201 } 7202 } 7203 7204 // fold (zext (truncate x)) -> (and x, mask) 7205 if (N0.getOpcode() == ISD::TRUNCATE) { 7206 // fold (zext (truncate (load x))) -> (zext (smaller load x)) 7207 // fold (zext (truncate (srl (load x), c))) -> (zext (smaller load (x+c/n))) 7208 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 7209 SDNode *oye = N0.getOperand(0).getNode(); 7210 if (NarrowLoad.getNode() != N0.getNode()) { 7211 CombineTo(N0.getNode(), NarrowLoad); 7212 // CombineTo deleted the truncate, if needed, but not what's under it. 7213 AddToWorklist(oye); 7214 } 7215 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7216 } 7217 7218 EVT SrcVT = N0.getOperand(0).getValueType(); 7219 EVT MinVT = N0.getValueType(); 7220 7221 // Try to mask before the extension to avoid having to generate a larger mask, 7222 // possibly over several sub-vectors. 7223 if (SrcVT.bitsLT(VT)) { 7224 if (!LegalOperations || (TLI.isOperationLegal(ISD::AND, SrcVT) && 7225 TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) { 7226 SDValue Op = N0.getOperand(0); 7227 Op = DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType()); 7228 AddToWorklist(Op.getNode()); 7229 return DAG.getZExtOrTrunc(Op, SDLoc(N), VT); 7230 } 7231 } 7232 7233 if (!LegalOperations || TLI.isOperationLegal(ISD::AND, VT)) { 7234 SDValue Op = N0.getOperand(0); 7235 if (SrcVT.bitsLT(VT)) { 7236 Op = DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, Op); 7237 AddToWorklist(Op.getNode()); 7238 } else if (SrcVT.bitsGT(VT)) { 7239 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Op); 7240 AddToWorklist(Op.getNode()); 7241 } 7242 return DAG.getZeroExtendInReg(Op, SDLoc(N), MinVT.getScalarType()); 7243 } 7244 } 7245 7246 // Fold (zext (and (trunc x), cst)) -> (and x, cst), 7247 // if either of the casts is not free. 7248 if (N0.getOpcode() == ISD::AND && 7249 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 7250 N0.getOperand(1).getOpcode() == ISD::Constant && 7251 (!TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 7252 N0.getValueType()) || 7253 !TLI.isZExtFree(N0.getValueType(), VT))) { 7254 SDValue X = N0.getOperand(0).getOperand(0); 7255 if (X.getValueType().bitsLT(VT)) { 7256 X = DAG.getNode(ISD::ANY_EXTEND, SDLoc(X), VT, X); 7257 } else if (X.getValueType().bitsGT(VT)) { 7258 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X); 7259 } 7260 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 7261 Mask = Mask.zext(VT.getSizeInBits()); 7262 SDLoc DL(N); 7263 return DAG.getNode(ISD::AND, DL, VT, 7264 X, DAG.getConstant(Mask, DL, VT)); 7265 } 7266 7267 // fold (zext (load x)) -> (zext (truncate (zextload x))) 7268 // Only generate vector extloads when 1) they're legal, and 2) they are 7269 // deemed desirable by the target. 7270 if (ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 7271 ((!LegalOperations && !VT.isVector() && 7272 !cast<LoadSDNode>(N0)->isVolatile()) || 7273 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()))) { 7274 bool DoXform = true; 7275 SmallVector<SDNode*, 4> SetCCs; 7276 if (!N0.hasOneUse()) 7277 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ZERO_EXTEND, SetCCs, TLI); 7278 if (VT.isVector()) 7279 DoXform &= TLI.isVectorLoadExtDesirable(SDValue(N, 0)); 7280 if (DoXform) { 7281 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7282 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, 7283 LN0->getChain(), 7284 LN0->getBasePtr(), N0.getValueType(), 7285 LN0->getMemOperand()); 7286 7287 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 7288 N0.getValueType(), ExtLoad); 7289 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 7290 7291 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 7292 ISD::ZERO_EXTEND); 7293 CombineTo(N, ExtLoad); 7294 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7295 } 7296 } 7297 7298 // fold (zext (load x)) to multiple smaller zextloads. 7299 // Only on illegal but splittable vectors. 7300 if (SDValue ExtLoad = CombineExtLoad(N)) 7301 return ExtLoad; 7302 7303 // fold (zext (and/or/xor (load x), cst)) -> 7304 // (and/or/xor (zextload x), (zext cst)) 7305 // Unless (and (load x) cst) will match as a zextload already and has 7306 // additional users. 7307 if ((N0.getOpcode() == ISD::AND || N0.getOpcode() == ISD::OR || 7308 N0.getOpcode() == ISD::XOR) && 7309 isa<LoadSDNode>(N0.getOperand(0)) && 7310 N0.getOperand(1).getOpcode() == ISD::Constant && 7311 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, N0.getValueType()) && 7312 (!LegalOperations && TLI.isOperationLegal(N0.getOpcode(), VT))) { 7313 LoadSDNode *LN0 = cast<LoadSDNode>(N0.getOperand(0)); 7314 if (LN0->getExtensionType() != ISD::SEXTLOAD && LN0->isUnindexed()) { 7315 bool DoXform = true; 7316 SmallVector<SDNode*, 4> SetCCs; 7317 if (!N0.hasOneUse()) { 7318 if (N0.getOpcode() == ISD::AND) { 7319 auto *AndC = cast<ConstantSDNode>(N0.getOperand(1)); 7320 auto NarrowLoad = false; 7321 EVT LoadResultTy = AndC->getValueType(0); 7322 EVT ExtVT, LoadedVT; 7323 if (isAndLoadExtLoad(AndC, LN0, LoadResultTy, ExtVT, LoadedVT, 7324 NarrowLoad)) 7325 DoXform = false; 7326 } 7327 if (DoXform) 7328 DoXform = ExtendUsesToFormExtLoad(N, N0.getOperand(0), 7329 ISD::ZERO_EXTEND, SetCCs, TLI); 7330 } 7331 if (DoXform) { 7332 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(LN0), VT, 7333 LN0->getChain(), LN0->getBasePtr(), 7334 LN0->getMemoryVT(), 7335 LN0->getMemOperand()); 7336 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 7337 Mask = Mask.zext(VT.getSizeInBits()); 7338 SDLoc DL(N); 7339 SDValue And = DAG.getNode(N0.getOpcode(), DL, VT, 7340 ExtLoad, DAG.getConstant(Mask, DL, VT)); 7341 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, 7342 SDLoc(N0.getOperand(0)), 7343 N0.getOperand(0).getValueType(), ExtLoad); 7344 CombineTo(N, And); 7345 CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1)); 7346 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, 7347 ISD::ZERO_EXTEND); 7348 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7349 } 7350 } 7351 } 7352 7353 // fold (zext (zextload x)) -> (zext (truncate (zextload x))) 7354 // fold (zext ( extload x)) -> (zext (truncate (zextload x))) 7355 if ((ISD::isZEXTLoad(N0.getNode()) || ISD::isEXTLoad(N0.getNode())) && 7356 ISD::isUNINDEXEDLoad(N0.getNode()) && N0.hasOneUse()) { 7357 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7358 EVT MemVT = LN0->getMemoryVT(); 7359 if ((!LegalOperations && !LN0->isVolatile()) || 7360 TLI.isLoadExtLegal(ISD::ZEXTLOAD, VT, MemVT)) { 7361 SDValue ExtLoad = DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, 7362 LN0->getChain(), 7363 LN0->getBasePtr(), MemVT, 7364 LN0->getMemOperand()); 7365 CombineTo(N, ExtLoad); 7366 CombineTo(N0.getNode(), 7367 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), N0.getValueType(), 7368 ExtLoad), 7369 ExtLoad.getValue(1)); 7370 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7371 } 7372 } 7373 7374 if (N0.getOpcode() == ISD::SETCC) { 7375 // Only do this before legalize for now. 7376 if (!LegalOperations && VT.isVector() && 7377 N0.getValueType().getVectorElementType() == MVT::i1) { 7378 EVT N00VT = N0.getOperand(0).getValueType(); 7379 if (getSetCCResultType(N00VT) == N0.getValueType()) 7380 return SDValue(); 7381 7382 // We know that the # elements of the results is the same as the # 7383 // elements of the compare (and the # elements of the compare result for 7384 // that matter). Check to see that they are the same size. If so, we know 7385 // that the element size of the sext'd result matches the element size of 7386 // the compare operands. 7387 SDLoc DL(N); 7388 SDValue VecOnes = DAG.getConstant(1, DL, VT); 7389 if (VT.getSizeInBits() == N00VT.getSizeInBits()) { 7390 // zext(setcc) -> (and (vsetcc), (1, 1, ...) for vectors. 7391 SDValue VSetCC = DAG.getNode(ISD::SETCC, DL, VT, N0.getOperand(0), 7392 N0.getOperand(1), N0.getOperand(2)); 7393 return DAG.getNode(ISD::AND, DL, VT, VSetCC, VecOnes); 7394 } 7395 7396 // If the desired elements are smaller or larger than the source 7397 // elements we can use a matching integer vector type and then 7398 // truncate/sign extend. 7399 EVT MatchingElementType = EVT::getIntegerVT( 7400 *DAG.getContext(), N00VT.getScalarSizeInBits()); 7401 EVT MatchingVectorType = EVT::getVectorVT( 7402 *DAG.getContext(), MatchingElementType, N00VT.getVectorNumElements()); 7403 SDValue VsetCC = 7404 DAG.getNode(ISD::SETCC, DL, MatchingVectorType, N0.getOperand(0), 7405 N0.getOperand(1), N0.getOperand(2)); 7406 return DAG.getNode(ISD::AND, DL, VT, DAG.getSExtOrTrunc(VsetCC, DL, VT), 7407 VecOnes); 7408 } 7409 7410 // zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 7411 SDLoc DL(N); 7412 if (SDValue SCC = SimplifySelectCC( 7413 DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT), 7414 DAG.getConstant(0, DL, VT), 7415 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true)) 7416 return SCC; 7417 } 7418 7419 // (zext (shl (zext x), cst)) -> (shl (zext x), cst) 7420 if ((N0.getOpcode() == ISD::SHL || N0.getOpcode() == ISD::SRL) && 7421 isa<ConstantSDNode>(N0.getOperand(1)) && 7422 N0.getOperand(0).getOpcode() == ISD::ZERO_EXTEND && 7423 N0.hasOneUse()) { 7424 SDValue ShAmt = N0.getOperand(1); 7425 unsigned ShAmtVal = cast<ConstantSDNode>(ShAmt)->getZExtValue(); 7426 if (N0.getOpcode() == ISD::SHL) { 7427 SDValue InnerZExt = N0.getOperand(0); 7428 // If the original shl may be shifting out bits, do not perform this 7429 // transformation. 7430 unsigned KnownZeroBits = InnerZExt.getValueSizeInBits() - 7431 InnerZExt.getOperand(0).getValueSizeInBits(); 7432 if (ShAmtVal > KnownZeroBits) 7433 return SDValue(); 7434 } 7435 7436 SDLoc DL(N); 7437 7438 // Ensure that the shift amount is wide enough for the shifted value. 7439 if (VT.getSizeInBits() >= 256) 7440 ShAmt = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShAmt); 7441 7442 return DAG.getNode(N0.getOpcode(), DL, VT, 7443 DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0)), 7444 ShAmt); 7445 } 7446 7447 return SDValue(); 7448 } 7449 7450 SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) { 7451 SDValue N0 = N->getOperand(0); 7452 EVT VT = N->getValueType(0); 7453 7454 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 7455 LegalOperations)) 7456 return SDValue(Res, 0); 7457 7458 // fold (aext (aext x)) -> (aext x) 7459 // fold (aext (zext x)) -> (zext x) 7460 // fold (aext (sext x)) -> (sext x) 7461 if (N0.getOpcode() == ISD::ANY_EXTEND || 7462 N0.getOpcode() == ISD::ZERO_EXTEND || 7463 N0.getOpcode() == ISD::SIGN_EXTEND) 7464 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0)); 7465 7466 // fold (aext (truncate (load x))) -> (aext (smaller load x)) 7467 // fold (aext (truncate (srl (load x), c))) -> (aext (small load (x+c/n))) 7468 if (N0.getOpcode() == ISD::TRUNCATE) { 7469 if (SDValue NarrowLoad = ReduceLoadWidth(N0.getNode())) { 7470 SDNode *oye = N0.getOperand(0).getNode(); 7471 if (NarrowLoad.getNode() != N0.getNode()) { 7472 CombineTo(N0.getNode(), NarrowLoad); 7473 // CombineTo deleted the truncate, if needed, but not what's under it. 7474 AddToWorklist(oye); 7475 } 7476 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7477 } 7478 } 7479 7480 // fold (aext (truncate x)) 7481 if (N0.getOpcode() == ISD::TRUNCATE) { 7482 SDValue TruncOp = N0.getOperand(0); 7483 if (TruncOp.getValueType() == VT) 7484 return TruncOp; // x iff x size == zext size. 7485 if (TruncOp.getValueType().bitsGT(VT)) 7486 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, TruncOp); 7487 return DAG.getNode(ISD::ANY_EXTEND, SDLoc(N), VT, TruncOp); 7488 } 7489 7490 // Fold (aext (and (trunc x), cst)) -> (and x, cst) 7491 // if the trunc is not free. 7492 if (N0.getOpcode() == ISD::AND && 7493 N0.getOperand(0).getOpcode() == ISD::TRUNCATE && 7494 N0.getOperand(1).getOpcode() == ISD::Constant && 7495 !TLI.isTruncateFree(N0.getOperand(0).getOperand(0).getValueType(), 7496 N0.getValueType())) { 7497 SDLoc DL(N); 7498 SDValue X = N0.getOperand(0).getOperand(0); 7499 if (X.getValueType().bitsLT(VT)) { 7500 X = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X); 7501 } else if (X.getValueType().bitsGT(VT)) { 7502 X = DAG.getNode(ISD::TRUNCATE, DL, VT, X); 7503 } 7504 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue(); 7505 Mask = Mask.zext(VT.getSizeInBits()); 7506 return DAG.getNode(ISD::AND, DL, VT, 7507 X, DAG.getConstant(Mask, DL, VT)); 7508 } 7509 7510 // fold (aext (load x)) -> (aext (truncate (extload x))) 7511 // None of the supported targets knows how to perform load and any_ext 7512 // on vectors in one instruction. We only perform this transformation on 7513 // scalars. 7514 if (ISD::isNON_EXTLoad(N0.getNode()) && !VT.isVector() && 7515 ISD::isUNINDEXEDLoad(N0.getNode()) && 7516 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) { 7517 bool DoXform = true; 7518 SmallVector<SDNode*, 4> SetCCs; 7519 if (!N0.hasOneUse()) 7520 DoXform = ExtendUsesToFormExtLoad(N, N0, ISD::ANY_EXTEND, SetCCs, TLI); 7521 if (DoXform) { 7522 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7523 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, 7524 LN0->getChain(), 7525 LN0->getBasePtr(), N0.getValueType(), 7526 LN0->getMemOperand()); 7527 CombineTo(N, ExtLoad); 7528 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 7529 N0.getValueType(), ExtLoad); 7530 CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1)); 7531 ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), 7532 ISD::ANY_EXTEND); 7533 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7534 } 7535 } 7536 7537 // fold (aext (zextload x)) -> (aext (truncate (zextload x))) 7538 // fold (aext (sextload x)) -> (aext (truncate (sextload x))) 7539 // fold (aext ( extload x)) -> (aext (truncate (extload x))) 7540 if (N0.getOpcode() == ISD::LOAD && 7541 !ISD::isNON_EXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 7542 N0.hasOneUse()) { 7543 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7544 ISD::LoadExtType ExtType = LN0->getExtensionType(); 7545 EVT MemVT = LN0->getMemoryVT(); 7546 if (!LegalOperations || TLI.isLoadExtLegal(ExtType, VT, MemVT)) { 7547 SDValue ExtLoad = DAG.getExtLoad(ExtType, SDLoc(N), 7548 VT, LN0->getChain(), LN0->getBasePtr(), 7549 MemVT, LN0->getMemOperand()); 7550 CombineTo(N, ExtLoad); 7551 CombineTo(N0.getNode(), 7552 DAG.getNode(ISD::TRUNCATE, SDLoc(N0), 7553 N0.getValueType(), ExtLoad), 7554 ExtLoad.getValue(1)); 7555 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7556 } 7557 } 7558 7559 if (N0.getOpcode() == ISD::SETCC) { 7560 // For vectors: 7561 // aext(setcc) -> vsetcc 7562 // aext(setcc) -> truncate(vsetcc) 7563 // aext(setcc) -> aext(vsetcc) 7564 // Only do this before legalize for now. 7565 if (VT.isVector() && !LegalOperations) { 7566 EVT N0VT = N0.getOperand(0).getValueType(); 7567 // We know that the # elements of the results is the same as the 7568 // # elements of the compare (and the # elements of the compare result 7569 // for that matter). Check to see that they are the same size. If so, 7570 // we know that the element size of the sext'd result matches the 7571 // element size of the compare operands. 7572 if (VT.getSizeInBits() == N0VT.getSizeInBits()) 7573 return DAG.getSetCC(SDLoc(N), VT, N0.getOperand(0), 7574 N0.getOperand(1), 7575 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 7576 // If the desired elements are smaller or larger than the source 7577 // elements we can use a matching integer vector type and then 7578 // truncate/any extend 7579 else { 7580 EVT MatchingVectorType = N0VT.changeVectorElementTypeToInteger(); 7581 SDValue VsetCC = 7582 DAG.getSetCC(SDLoc(N), MatchingVectorType, N0.getOperand(0), 7583 N0.getOperand(1), 7584 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 7585 return DAG.getAnyExtOrTrunc(VsetCC, SDLoc(N), VT); 7586 } 7587 } 7588 7589 // aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc 7590 SDLoc DL(N); 7591 if (SDValue SCC = SimplifySelectCC( 7592 DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT), 7593 DAG.getConstant(0, DL, VT), 7594 cast<CondCodeSDNode>(N0.getOperand(2))->get(), true)) 7595 return SCC; 7596 } 7597 7598 return SDValue(); 7599 } 7600 7601 SDValue DAGCombiner::visitAssertZext(SDNode *N) { 7602 SDValue N0 = N->getOperand(0); 7603 SDValue N1 = N->getOperand(1); 7604 EVT EVT = cast<VTSDNode>(N1)->getVT(); 7605 7606 // fold (assertzext (assertzext x, vt), vt) -> (assertzext x, vt) 7607 if (N0.getOpcode() == ISD::AssertZext && 7608 EVT == cast<VTSDNode>(N0.getOperand(1))->getVT()) 7609 return N0; 7610 7611 return SDValue(); 7612 } 7613 7614 /// See if the specified operand can be simplified with the knowledge that only 7615 /// the bits specified by Mask are used. If so, return the simpler operand, 7616 /// otherwise return a null SDValue. 7617 /// 7618 /// (This exists alongside SimplifyDemandedBits because GetDemandedBits can 7619 /// simplify nodes with multiple uses more aggressively.) 7620 SDValue DAGCombiner::GetDemandedBits(SDValue V, const APInt &Mask) { 7621 switch (V.getOpcode()) { 7622 default: break; 7623 case ISD::Constant: { 7624 const ConstantSDNode *CV = cast<ConstantSDNode>(V.getNode()); 7625 assert(CV && "Const value should be ConstSDNode."); 7626 const APInt &CVal = CV->getAPIntValue(); 7627 APInt NewVal = CVal & Mask; 7628 if (NewVal != CVal) 7629 return DAG.getConstant(NewVal, SDLoc(V), V.getValueType()); 7630 break; 7631 } 7632 case ISD::OR: 7633 case ISD::XOR: 7634 // If the LHS or RHS don't contribute bits to the or, drop them. 7635 if (DAG.MaskedValueIsZero(V.getOperand(0), Mask)) 7636 return V.getOperand(1); 7637 if (DAG.MaskedValueIsZero(V.getOperand(1), Mask)) 7638 return V.getOperand(0); 7639 break; 7640 case ISD::SRL: 7641 // Only look at single-use SRLs. 7642 if (!V.getNode()->hasOneUse()) 7643 break; 7644 if (ConstantSDNode *RHSC = getAsNonOpaqueConstant(V.getOperand(1))) { 7645 // See if we can recursively simplify the LHS. 7646 unsigned Amt = RHSC->getZExtValue(); 7647 7648 // Watch out for shift count overflow though. 7649 if (Amt >= Mask.getBitWidth()) break; 7650 APInt NewMask = Mask << Amt; 7651 if (SDValue SimplifyLHS = GetDemandedBits(V.getOperand(0), NewMask)) 7652 return DAG.getNode(ISD::SRL, SDLoc(V), V.getValueType(), 7653 SimplifyLHS, V.getOperand(1)); 7654 } 7655 break; 7656 case ISD::AND: { 7657 // X & -1 -> X (ignoring bits which aren't demanded). 7658 ConstantSDNode *AndVal = isConstOrConstSplat(V.getOperand(1)); 7659 if (AndVal && (AndVal->getAPIntValue() & Mask) == Mask) 7660 return V.getOperand(0); 7661 break; 7662 } 7663 } 7664 return SDValue(); 7665 } 7666 7667 /// If the result of a wider load is shifted to right of N bits and then 7668 /// truncated to a narrower type and where N is a multiple of number of bits of 7669 /// the narrower type, transform it to a narrower load from address + N / num of 7670 /// bits of new type. If the result is to be extended, also fold the extension 7671 /// to form a extending load. 7672 SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) { 7673 unsigned Opc = N->getOpcode(); 7674 7675 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 7676 SDValue N0 = N->getOperand(0); 7677 EVT VT = N->getValueType(0); 7678 EVT ExtVT = VT; 7679 7680 // This transformation isn't valid for vector loads. 7681 if (VT.isVector()) 7682 return SDValue(); 7683 7684 // Special case: SIGN_EXTEND_INREG is basically truncating to ExtVT then 7685 // extended to VT. 7686 if (Opc == ISD::SIGN_EXTEND_INREG) { 7687 ExtType = ISD::SEXTLOAD; 7688 ExtVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 7689 } else if (Opc == ISD::SRL) { 7690 // Another special-case: SRL is basically zero-extending a narrower value. 7691 ExtType = ISD::ZEXTLOAD; 7692 N0 = SDValue(N, 0); 7693 ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 7694 if (!N01) return SDValue(); 7695 ExtVT = EVT::getIntegerVT(*DAG.getContext(), 7696 VT.getSizeInBits() - N01->getZExtValue()); 7697 } 7698 if (LegalOperations && !TLI.isLoadExtLegal(ExtType, VT, ExtVT)) 7699 return SDValue(); 7700 7701 unsigned EVTBits = ExtVT.getSizeInBits(); 7702 7703 // Do not generate loads of non-round integer types since these can 7704 // be expensive (and would be wrong if the type is not byte sized). 7705 if (!ExtVT.isRound()) 7706 return SDValue(); 7707 7708 unsigned ShAmt = 0; 7709 if (N0.getOpcode() == ISD::SRL && N0.hasOneUse()) { 7710 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 7711 ShAmt = N01->getZExtValue(); 7712 // Is the shift amount a multiple of size of VT? 7713 if ((ShAmt & (EVTBits-1)) == 0) { 7714 N0 = N0.getOperand(0); 7715 // Is the load width a multiple of size of VT? 7716 if ((N0.getValueSizeInBits() & (EVTBits-1)) != 0) 7717 return SDValue(); 7718 } 7719 7720 // At this point, we must have a load or else we can't do the transform. 7721 if (!isa<LoadSDNode>(N0)) return SDValue(); 7722 7723 // Because a SRL must be assumed to *need* to zero-extend the high bits 7724 // (as opposed to anyext the high bits), we can't combine the zextload 7725 // lowering of SRL and an sextload. 7726 if (cast<LoadSDNode>(N0)->getExtensionType() == ISD::SEXTLOAD) 7727 return SDValue(); 7728 7729 // If the shift amount is larger than the input type then we're not 7730 // accessing any of the loaded bytes. If the load was a zextload/extload 7731 // then the result of the shift+trunc is zero/undef (handled elsewhere). 7732 if (ShAmt >= cast<LoadSDNode>(N0)->getMemoryVT().getSizeInBits()) 7733 return SDValue(); 7734 } 7735 } 7736 7737 // If the load is shifted left (and the result isn't shifted back right), 7738 // we can fold the truncate through the shift. 7739 unsigned ShLeftAmt = 0; 7740 if (ShAmt == 0 && N0.getOpcode() == ISD::SHL && N0.hasOneUse() && 7741 ExtVT == VT && TLI.isNarrowingProfitable(N0.getValueType(), VT)) { 7742 if (ConstantSDNode *N01 = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 7743 ShLeftAmt = N01->getZExtValue(); 7744 N0 = N0.getOperand(0); 7745 } 7746 } 7747 7748 // If we haven't found a load, we can't narrow it. Don't transform one with 7749 // multiple uses, this would require adding a new load. 7750 if (!isa<LoadSDNode>(N0) || !N0.hasOneUse()) 7751 return SDValue(); 7752 7753 // Don't change the width of a volatile load. 7754 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7755 if (LN0->isVolatile()) 7756 return SDValue(); 7757 7758 // Verify that we are actually reducing a load width here. 7759 if (LN0->getMemoryVT().getSizeInBits() < EVTBits) 7760 return SDValue(); 7761 7762 // For the transform to be legal, the load must produce only two values 7763 // (the value loaded and the chain). Don't transform a pre-increment 7764 // load, for example, which produces an extra value. Otherwise the 7765 // transformation is not equivalent, and the downstream logic to replace 7766 // uses gets things wrong. 7767 if (LN0->getNumValues() > 2) 7768 return SDValue(); 7769 7770 // If the load that we're shrinking is an extload and we're not just 7771 // discarding the extension we can't simply shrink the load. Bail. 7772 // TODO: It would be possible to merge the extensions in some cases. 7773 if (LN0->getExtensionType() != ISD::NON_EXTLOAD && 7774 LN0->getMemoryVT().getSizeInBits() < ExtVT.getSizeInBits() + ShAmt) 7775 return SDValue(); 7776 7777 if (!TLI.shouldReduceLoadWidth(LN0, ExtType, ExtVT)) 7778 return SDValue(); 7779 7780 EVT PtrType = N0.getOperand(1).getValueType(); 7781 7782 if (PtrType == MVT::Untyped || PtrType.isExtended()) 7783 // It's not possible to generate a constant of extended or untyped type. 7784 return SDValue(); 7785 7786 // For big endian targets, we need to adjust the offset to the pointer to 7787 // load the correct bytes. 7788 if (DAG.getDataLayout().isBigEndian()) { 7789 unsigned LVTStoreBits = LN0->getMemoryVT().getStoreSizeInBits(); 7790 unsigned EVTStoreBits = ExtVT.getStoreSizeInBits(); 7791 ShAmt = LVTStoreBits - EVTStoreBits - ShAmt; 7792 } 7793 7794 uint64_t PtrOff = ShAmt / 8; 7795 unsigned NewAlign = MinAlign(LN0->getAlignment(), PtrOff); 7796 SDLoc DL(LN0); 7797 // The original load itself didn't wrap, so an offset within it doesn't. 7798 SDNodeFlags Flags; 7799 Flags.setNoUnsignedWrap(true); 7800 SDValue NewPtr = DAG.getNode(ISD::ADD, DL, 7801 PtrType, LN0->getBasePtr(), 7802 DAG.getConstant(PtrOff, DL, PtrType), 7803 &Flags); 7804 AddToWorklist(NewPtr.getNode()); 7805 7806 SDValue Load; 7807 if (ExtType == ISD::NON_EXTLOAD) 7808 Load = DAG.getLoad(VT, SDLoc(N0), LN0->getChain(), NewPtr, 7809 LN0->getPointerInfo().getWithOffset(PtrOff), NewAlign, 7810 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 7811 else 7812 Load = DAG.getExtLoad(ExtType, SDLoc(N0), VT, LN0->getChain(), NewPtr, 7813 LN0->getPointerInfo().getWithOffset(PtrOff), ExtVT, 7814 NewAlign, LN0->getMemOperand()->getFlags(), 7815 LN0->getAAInfo()); 7816 7817 // Replace the old load's chain with the new load's chain. 7818 WorklistRemover DeadNodes(*this); 7819 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 7820 7821 // Shift the result left, if we've swallowed a left shift. 7822 SDValue Result = Load; 7823 if (ShLeftAmt != 0) { 7824 EVT ShImmTy = getShiftAmountTy(Result.getValueType()); 7825 if (!isUIntN(ShImmTy.getSizeInBits(), ShLeftAmt)) 7826 ShImmTy = VT; 7827 // If the shift amount is as large as the result size (but, presumably, 7828 // no larger than the source) then the useful bits of the result are 7829 // zero; we can't simply return the shortened shift, because the result 7830 // of that operation is undefined. 7831 SDLoc DL(N0); 7832 if (ShLeftAmt >= VT.getSizeInBits()) 7833 Result = DAG.getConstant(0, DL, VT); 7834 else 7835 Result = DAG.getNode(ISD::SHL, DL, VT, 7836 Result, DAG.getConstant(ShLeftAmt, DL, ShImmTy)); 7837 } 7838 7839 // Return the new loaded value. 7840 return Result; 7841 } 7842 7843 SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { 7844 SDValue N0 = N->getOperand(0); 7845 SDValue N1 = N->getOperand(1); 7846 EVT VT = N->getValueType(0); 7847 EVT EVT = cast<VTSDNode>(N1)->getVT(); 7848 unsigned VTBits = VT.getScalarSizeInBits(); 7849 unsigned EVTBits = EVT.getScalarSizeInBits(); 7850 7851 if (N0.isUndef()) 7852 return DAG.getUNDEF(VT); 7853 7854 // fold (sext_in_reg c1) -> c1 7855 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 7856 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, N0, N1); 7857 7858 // If the input is already sign extended, just drop the extension. 7859 if (DAG.ComputeNumSignBits(N0) >= VTBits-EVTBits+1) 7860 return N0; 7861 7862 // fold (sext_in_reg (sext_in_reg x, VT2), VT1) -> (sext_in_reg x, minVT) pt2 7863 if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 7864 EVT.bitsLT(cast<VTSDNode>(N0.getOperand(1))->getVT())) 7865 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 7866 N0.getOperand(0), N1); 7867 7868 // fold (sext_in_reg (sext x)) -> (sext x) 7869 // fold (sext_in_reg (aext x)) -> (sext x) 7870 // if x is small enough. 7871 if (N0.getOpcode() == ISD::SIGN_EXTEND || N0.getOpcode() == ISD::ANY_EXTEND) { 7872 SDValue N00 = N0.getOperand(0); 7873 if (N00.getScalarValueSizeInBits() <= EVTBits && 7874 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) 7875 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1); 7876 } 7877 7878 // fold (sext_in_reg (*_extend_vector_inreg x)) -> (sext_vector_in_reg x) 7879 if ((N0.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG || 7880 N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG || 7881 N0.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) && 7882 N0.getOperand(0).getScalarValueSizeInBits() == EVTBits) { 7883 if (!LegalOperations || 7884 TLI.isOperationLegal(ISD::SIGN_EXTEND_VECTOR_INREG, VT)) 7885 return DAG.getSignExtendVectorInReg(N0.getOperand(0), SDLoc(N), VT); 7886 } 7887 7888 // fold (sext_in_reg (zext x)) -> (sext x) 7889 // iff we are extending the source sign bit. 7890 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 7891 SDValue N00 = N0.getOperand(0); 7892 if (N00.getScalarValueSizeInBits() == EVTBits && 7893 (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) 7894 return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1); 7895 } 7896 7897 // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero. 7898 if (DAG.MaskedValueIsZero(N0, APInt::getOneBitSet(VTBits, EVTBits - 1))) 7899 return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT.getScalarType()); 7900 7901 // fold operands of sext_in_reg based on knowledge that the top bits are not 7902 // demanded. 7903 if (SimplifyDemandedBits(SDValue(N, 0))) 7904 return SDValue(N, 0); 7905 7906 // fold (sext_in_reg (load x)) -> (smaller sextload x) 7907 // fold (sext_in_reg (srl (load x), c)) -> (smaller sextload (x+c/evtbits)) 7908 if (SDValue NarrowLoad = ReduceLoadWidth(N)) 7909 return NarrowLoad; 7910 7911 // fold (sext_in_reg (srl X, 24), i8) -> (sra X, 24) 7912 // fold (sext_in_reg (srl X, 23), i8) -> (sra X, 23) iff possible. 7913 // We already fold "(sext_in_reg (srl X, 25), i8) -> srl X, 25" above. 7914 if (N0.getOpcode() == ISD::SRL) { 7915 if (ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(N0.getOperand(1))) 7916 if (ShAmt->getZExtValue()+EVTBits <= VTBits) { 7917 // We can turn this into an SRA iff the input to the SRL is already sign 7918 // extended enough. 7919 unsigned InSignBits = DAG.ComputeNumSignBits(N0.getOperand(0)); 7920 if (VTBits-(ShAmt->getZExtValue()+EVTBits) < InSignBits) 7921 return DAG.getNode(ISD::SRA, SDLoc(N), VT, 7922 N0.getOperand(0), N0.getOperand(1)); 7923 } 7924 } 7925 7926 // fold (sext_inreg (extload x)) -> (sextload x) 7927 if (ISD::isEXTLoad(N0.getNode()) && 7928 ISD::isUNINDEXEDLoad(N0.getNode()) && 7929 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 7930 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 7931 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) { 7932 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7933 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 7934 LN0->getChain(), 7935 LN0->getBasePtr(), EVT, 7936 LN0->getMemOperand()); 7937 CombineTo(N, ExtLoad); 7938 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 7939 AddToWorklist(ExtLoad.getNode()); 7940 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7941 } 7942 // fold (sext_inreg (zextload x)) -> (sextload x) iff load has one use 7943 if (ISD::isZEXTLoad(N0.getNode()) && ISD::isUNINDEXEDLoad(N0.getNode()) && 7944 N0.hasOneUse() && 7945 EVT == cast<LoadSDNode>(N0)->getMemoryVT() && 7946 ((!LegalOperations && !cast<LoadSDNode>(N0)->isVolatile()) || 7947 TLI.isLoadExtLegal(ISD::SEXTLOAD, VT, EVT))) { 7948 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 7949 SDValue ExtLoad = DAG.getExtLoad(ISD::SEXTLOAD, SDLoc(N), VT, 7950 LN0->getChain(), 7951 LN0->getBasePtr(), EVT, 7952 LN0->getMemOperand()); 7953 CombineTo(N, ExtLoad); 7954 CombineTo(N0.getNode(), ExtLoad, ExtLoad.getValue(1)); 7955 return SDValue(N, 0); // Return N so it doesn't get rechecked! 7956 } 7957 7958 // Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16)) 7959 if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) { 7960 if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0), 7961 N0.getOperand(1), false)) 7962 return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT, 7963 BSwap, N1); 7964 } 7965 7966 return SDValue(); 7967 } 7968 7969 SDValue DAGCombiner::visitSIGN_EXTEND_VECTOR_INREG(SDNode *N) { 7970 SDValue N0 = N->getOperand(0); 7971 EVT VT = N->getValueType(0); 7972 7973 if (N0.isUndef()) 7974 return DAG.getUNDEF(VT); 7975 7976 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 7977 LegalOperations)) 7978 return SDValue(Res, 0); 7979 7980 return SDValue(); 7981 } 7982 7983 SDValue DAGCombiner::visitZERO_EXTEND_VECTOR_INREG(SDNode *N) { 7984 SDValue N0 = N->getOperand(0); 7985 EVT VT = N->getValueType(0); 7986 7987 if (N0.isUndef()) 7988 return DAG.getUNDEF(VT); 7989 7990 if (SDNode *Res = tryToFoldExtendOfConstant(N, TLI, DAG, LegalTypes, 7991 LegalOperations)) 7992 return SDValue(Res, 0); 7993 7994 return SDValue(); 7995 } 7996 7997 SDValue DAGCombiner::visitTRUNCATE(SDNode *N) { 7998 SDValue N0 = N->getOperand(0); 7999 EVT VT = N->getValueType(0); 8000 bool isLE = DAG.getDataLayout().isLittleEndian(); 8001 8002 // noop truncate 8003 if (N0.getValueType() == N->getValueType(0)) 8004 return N0; 8005 // fold (truncate c1) -> c1 8006 if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) 8007 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0); 8008 // fold (truncate (truncate x)) -> (truncate x) 8009 if (N0.getOpcode() == ISD::TRUNCATE) 8010 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0)); 8011 // fold (truncate (ext x)) -> (ext x) or (truncate x) or x 8012 if (N0.getOpcode() == ISD::ZERO_EXTEND || 8013 N0.getOpcode() == ISD::SIGN_EXTEND || 8014 N0.getOpcode() == ISD::ANY_EXTEND) { 8015 // if the source is smaller than the dest, we still need an extend. 8016 if (N0.getOperand(0).getValueType().bitsLT(VT)) 8017 return DAG.getNode(N0.getOpcode(), SDLoc(N), VT, N0.getOperand(0)); 8018 // if the source is larger than the dest, than we just need the truncate. 8019 if (N0.getOperand(0).getValueType().bitsGT(VT)) 8020 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, N0.getOperand(0)); 8021 // if the source and dest are the same type, we can drop both the extend 8022 // and the truncate. 8023 return N0.getOperand(0); 8024 } 8025 8026 // If this is anyext(trunc), don't fold it, allow ourselves to be folded. 8027 if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ANY_EXTEND)) 8028 return SDValue(); 8029 8030 // Fold extract-and-trunc into a narrow extract. For example: 8031 // i64 x = EXTRACT_VECTOR_ELT(v2i64 val, i32 1) 8032 // i32 y = TRUNCATE(i64 x) 8033 // -- becomes -- 8034 // v16i8 b = BITCAST (v2i64 val) 8035 // i8 x = EXTRACT_VECTOR_ELT(v16i8 b, i32 8) 8036 // 8037 // Note: We only run this optimization after type legalization (which often 8038 // creates this pattern) and before operation legalization after which 8039 // we need to be more careful about the vector instructions that we generate. 8040 if (N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT && 8041 LegalTypes && !LegalOperations && N0->hasOneUse() && VT != MVT::i1) { 8042 8043 EVT VecTy = N0.getOperand(0).getValueType(); 8044 EVT ExTy = N0.getValueType(); 8045 EVT TrTy = N->getValueType(0); 8046 8047 unsigned NumElem = VecTy.getVectorNumElements(); 8048 unsigned SizeRatio = ExTy.getSizeInBits()/TrTy.getSizeInBits(); 8049 8050 EVT NVT = EVT::getVectorVT(*DAG.getContext(), TrTy, SizeRatio * NumElem); 8051 assert(NVT.getSizeInBits() == VecTy.getSizeInBits() && "Invalid Size"); 8052 8053 SDValue EltNo = N0->getOperand(1); 8054 if (isa<ConstantSDNode>(EltNo) && isTypeLegal(NVT)) { 8055 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 8056 EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 8057 int Index = isLE ? (Elt*SizeRatio) : (Elt*SizeRatio + (SizeRatio-1)); 8058 8059 SDLoc DL(N); 8060 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TrTy, 8061 DAG.getBitcast(NVT, N0.getOperand(0)), 8062 DAG.getConstant(Index, DL, IndexTy)); 8063 } 8064 } 8065 8066 // trunc (select c, a, b) -> select c, (trunc a), (trunc b) 8067 if (N0.getOpcode() == ISD::SELECT && N0.hasOneUse()) { 8068 EVT SrcVT = N0.getValueType(); 8069 if ((!LegalOperations || TLI.isOperationLegal(ISD::SELECT, SrcVT)) && 8070 TLI.isTruncateFree(SrcVT, VT)) { 8071 SDLoc SL(N0); 8072 SDValue Cond = N0.getOperand(0); 8073 SDValue TruncOp0 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1)); 8074 SDValue TruncOp1 = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(2)); 8075 return DAG.getNode(ISD::SELECT, SDLoc(N), VT, Cond, TruncOp0, TruncOp1); 8076 } 8077 } 8078 8079 // trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits() 8080 if (N0.getOpcode() == ISD::SHL && N0.hasOneUse() && 8081 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::SHL, VT)) && 8082 TLI.isTypeDesirableForOp(ISD::SHL, VT)) { 8083 if (const ConstantSDNode *CAmt = isConstOrConstSplat(N0.getOperand(1))) { 8084 uint64_t Amt = CAmt->getZExtValue(); 8085 unsigned Size = VT.getScalarSizeInBits(); 8086 8087 if (Amt < Size) { 8088 SDLoc SL(N); 8089 EVT AmtVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 8090 8091 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0)); 8092 return DAG.getNode(ISD::SHL, SL, VT, Trunc, 8093 DAG.getConstant(Amt, SL, AmtVT)); 8094 } 8095 } 8096 } 8097 8098 // Fold a series of buildvector, bitcast, and truncate if possible. 8099 // For example fold 8100 // (2xi32 trunc (bitcast ((4xi32)buildvector x, x, y, y) 2xi64)) to 8101 // (2xi32 (buildvector x, y)). 8102 if (Level == AfterLegalizeVectorOps && VT.isVector() && 8103 N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() && 8104 N0.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && 8105 N0.getOperand(0).hasOneUse()) { 8106 8107 SDValue BuildVect = N0.getOperand(0); 8108 EVT BuildVectEltTy = BuildVect.getValueType().getVectorElementType(); 8109 EVT TruncVecEltTy = VT.getVectorElementType(); 8110 8111 // Check that the element types match. 8112 if (BuildVectEltTy == TruncVecEltTy) { 8113 // Now we only need to compute the offset of the truncated elements. 8114 unsigned BuildVecNumElts = BuildVect.getNumOperands(); 8115 unsigned TruncVecNumElts = VT.getVectorNumElements(); 8116 unsigned TruncEltOffset = BuildVecNumElts / TruncVecNumElts; 8117 8118 assert((BuildVecNumElts % TruncVecNumElts) == 0 && 8119 "Invalid number of elements"); 8120 8121 SmallVector<SDValue, 8> Opnds; 8122 for (unsigned i = 0, e = BuildVecNumElts; i != e; i += TruncEltOffset) 8123 Opnds.push_back(BuildVect.getOperand(i)); 8124 8125 return DAG.getBuildVector(VT, SDLoc(N), Opnds); 8126 } 8127 } 8128 8129 // See if we can simplify the input to this truncate through knowledge that 8130 // only the low bits are being used. 8131 // For example "trunc (or (shl x, 8), y)" // -> trunc y 8132 // Currently we only perform this optimization on scalars because vectors 8133 // may have different active low bits. 8134 if (!VT.isVector()) { 8135 if (SDValue Shorter = 8136 GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(), 8137 VT.getSizeInBits()))) 8138 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter); 8139 } 8140 8141 // fold (truncate (load x)) -> (smaller load x) 8142 // fold (truncate (srl (load x), c)) -> (smaller load (x+c/evtbits)) 8143 if (!LegalTypes || TLI.isTypeDesirableForOp(N0.getOpcode(), VT)) { 8144 if (SDValue Reduced = ReduceLoadWidth(N)) 8145 return Reduced; 8146 8147 // Handle the case where the load remains an extending load even 8148 // after truncation. 8149 if (N0.hasOneUse() && ISD::isUNINDEXEDLoad(N0.getNode())) { 8150 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 8151 if (!LN0->isVolatile() && 8152 LN0->getMemoryVT().getStoreSizeInBits() < VT.getSizeInBits()) { 8153 SDValue NewLoad = DAG.getExtLoad(LN0->getExtensionType(), SDLoc(LN0), 8154 VT, LN0->getChain(), LN0->getBasePtr(), 8155 LN0->getMemoryVT(), 8156 LN0->getMemOperand()); 8157 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLoad.getValue(1)); 8158 return NewLoad; 8159 } 8160 } 8161 } 8162 8163 // fold (trunc (concat ... x ...)) -> (concat ..., (trunc x), ...)), 8164 // where ... are all 'undef'. 8165 if (N0.getOpcode() == ISD::CONCAT_VECTORS && !LegalTypes) { 8166 SmallVector<EVT, 8> VTs; 8167 SDValue V; 8168 unsigned Idx = 0; 8169 unsigned NumDefs = 0; 8170 8171 for (unsigned i = 0, e = N0.getNumOperands(); i != e; ++i) { 8172 SDValue X = N0.getOperand(i); 8173 if (!X.isUndef()) { 8174 V = X; 8175 Idx = i; 8176 NumDefs++; 8177 } 8178 // Stop if more than one members are non-undef. 8179 if (NumDefs > 1) 8180 break; 8181 VTs.push_back(EVT::getVectorVT(*DAG.getContext(), 8182 VT.getVectorElementType(), 8183 X.getValueType().getVectorNumElements())); 8184 } 8185 8186 if (NumDefs == 0) 8187 return DAG.getUNDEF(VT); 8188 8189 if (NumDefs == 1) { 8190 assert(V.getNode() && "The single defined operand is empty!"); 8191 SmallVector<SDValue, 8> Opnds; 8192 for (unsigned i = 0, e = VTs.size(); i != e; ++i) { 8193 if (i != Idx) { 8194 Opnds.push_back(DAG.getUNDEF(VTs[i])); 8195 continue; 8196 } 8197 SDValue NV = DAG.getNode(ISD::TRUNCATE, SDLoc(V), VTs[i], V); 8198 AddToWorklist(NV.getNode()); 8199 Opnds.push_back(NV); 8200 } 8201 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Opnds); 8202 } 8203 } 8204 8205 // Fold truncate of a bitcast of a vector to an extract of the low vector 8206 // element. 8207 // 8208 // e.g. trunc (i64 (bitcast v2i32:x)) -> extract_vector_elt v2i32:x, 0 8209 if (N0.getOpcode() == ISD::BITCAST && !VT.isVector()) { 8210 SDValue VecSrc = N0.getOperand(0); 8211 EVT SrcVT = VecSrc.getValueType(); 8212 if (SrcVT.isVector() && SrcVT.getScalarType() == VT && 8213 (!LegalOperations || 8214 TLI.isOperationLegal(ISD::EXTRACT_VECTOR_ELT, SrcVT))) { 8215 SDLoc SL(N); 8216 8217 EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout()); 8218 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, VT, 8219 VecSrc, DAG.getConstant(0, SL, IdxVT)); 8220 } 8221 } 8222 8223 // Simplify the operands using demanded-bits information. 8224 if (!VT.isVector() && 8225 SimplifyDemandedBits(SDValue(N, 0))) 8226 return SDValue(N, 0); 8227 8228 // (trunc adde(X, Y, Carry)) -> (adde trunc(X), trunc(Y), Carry) 8229 // When the adde's carry is not used. 8230 if (N0.getOpcode() == ISD::ADDE && N0.hasOneUse() && 8231 !N0.getNode()->hasAnyUseOfValue(1) && 8232 (!LegalOperations || TLI.isOperationLegal(ISD::ADDE, VT))) { 8233 SDLoc SL(N); 8234 auto X = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0)); 8235 auto Y = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1)); 8236 return DAG.getNode(ISD::ADDE, SL, DAG.getVTList(VT, MVT::Glue), 8237 X, Y, N0.getOperand(2)); 8238 } 8239 8240 return SDValue(); 8241 } 8242 8243 static SDNode *getBuildPairElt(SDNode *N, unsigned i) { 8244 SDValue Elt = N->getOperand(i); 8245 if (Elt.getOpcode() != ISD::MERGE_VALUES) 8246 return Elt.getNode(); 8247 return Elt.getOperand(Elt.getResNo()).getNode(); 8248 } 8249 8250 /// build_pair (load, load) -> load 8251 /// if load locations are consecutive. 8252 SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) { 8253 assert(N->getOpcode() == ISD::BUILD_PAIR); 8254 8255 LoadSDNode *LD1 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 0)); 8256 LoadSDNode *LD2 = dyn_cast<LoadSDNode>(getBuildPairElt(N, 1)); 8257 if (!LD1 || !LD2 || !ISD::isNON_EXTLoad(LD1) || !LD1->hasOneUse() || 8258 LD1->getAddressSpace() != LD2->getAddressSpace()) 8259 return SDValue(); 8260 EVT LD1VT = LD1->getValueType(0); 8261 unsigned LD1Bytes = LD1VT.getSizeInBits() / 8; 8262 if (ISD::isNON_EXTLoad(LD2) && LD2->hasOneUse() && 8263 DAG.areNonVolatileConsecutiveLoads(LD2, LD1, LD1Bytes, 1)) { 8264 unsigned Align = LD1->getAlignment(); 8265 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( 8266 VT.getTypeForEVT(*DAG.getContext())); 8267 8268 if (NewAlign <= Align && 8269 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) 8270 return DAG.getLoad(VT, SDLoc(N), LD1->getChain(), LD1->getBasePtr(), 8271 LD1->getPointerInfo(), Align); 8272 } 8273 8274 return SDValue(); 8275 } 8276 8277 static unsigned getPPCf128HiElementSelector(const SelectionDAG &DAG) { 8278 // On little-endian machines, bitcasting from ppcf128 to i128 does swap the Hi 8279 // and Lo parts; on big-endian machines it doesn't. 8280 return DAG.getDataLayout().isBigEndian() ? 1 : 0; 8281 } 8282 8283 static SDValue foldBitcastedFPLogic(SDNode *N, SelectionDAG &DAG, 8284 const TargetLowering &TLI) { 8285 // If this is not a bitcast to an FP type or if the target doesn't have 8286 // IEEE754-compliant FP logic, we're done. 8287 EVT VT = N->getValueType(0); 8288 if (!VT.isFloatingPoint() || !TLI.hasBitPreservingFPLogic(VT)) 8289 return SDValue(); 8290 8291 // TODO: Use splat values for the constant-checking below and remove this 8292 // restriction. 8293 SDValue N0 = N->getOperand(0); 8294 EVT SourceVT = N0.getValueType(); 8295 if (SourceVT.isVector()) 8296 return SDValue(); 8297 8298 unsigned FPOpcode; 8299 APInt SignMask; 8300 switch (N0.getOpcode()) { 8301 case ISD::AND: 8302 FPOpcode = ISD::FABS; 8303 SignMask = ~APInt::getSignBit(SourceVT.getSizeInBits()); 8304 break; 8305 case ISD::XOR: 8306 FPOpcode = ISD::FNEG; 8307 SignMask = APInt::getSignBit(SourceVT.getSizeInBits()); 8308 break; 8309 // TODO: ISD::OR --> ISD::FNABS? 8310 default: 8311 return SDValue(); 8312 } 8313 8314 // Fold (bitcast int (and (bitcast fp X to int), 0x7fff...) to fp) -> fabs X 8315 // Fold (bitcast int (xor (bitcast fp X to int), 0x8000...) to fp) -> fneg X 8316 SDValue LogicOp0 = N0.getOperand(0); 8317 ConstantSDNode *LogicOp1 = dyn_cast<ConstantSDNode>(N0.getOperand(1)); 8318 if (LogicOp1 && LogicOp1->getAPIntValue() == SignMask && 8319 LogicOp0.getOpcode() == ISD::BITCAST && 8320 LogicOp0->getOperand(0).getValueType() == VT) 8321 return DAG.getNode(FPOpcode, SDLoc(N), VT, LogicOp0->getOperand(0)); 8322 8323 return SDValue(); 8324 } 8325 8326 SDValue DAGCombiner::visitBITCAST(SDNode *N) { 8327 SDValue N0 = N->getOperand(0); 8328 EVT VT = N->getValueType(0); 8329 8330 if (N0.isUndef()) 8331 return DAG.getUNDEF(VT); 8332 8333 // If the input is a BUILD_VECTOR with all constant elements, fold this now. 8334 // Only do this before legalize, since afterward the target may be depending 8335 // on the bitconvert. 8336 // First check to see if this is all constant. 8337 if (!LegalTypes && 8338 N0.getOpcode() == ISD::BUILD_VECTOR && N0.getNode()->hasOneUse() && 8339 VT.isVector()) { 8340 bool isSimple = cast<BuildVectorSDNode>(N0)->isConstant(); 8341 8342 EVT DestEltVT = N->getValueType(0).getVectorElementType(); 8343 assert(!DestEltVT.isVector() && 8344 "Element type of vector ValueType must not be vector!"); 8345 if (isSimple) 8346 return ConstantFoldBITCASTofBUILD_VECTOR(N0.getNode(), DestEltVT); 8347 } 8348 8349 // If the input is a constant, let getNode fold it. 8350 if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) { 8351 // If we can't allow illegal operations, we need to check that this is just 8352 // a fp -> int or int -> conversion and that the resulting operation will 8353 // be legal. 8354 if (!LegalOperations || 8355 (isa<ConstantSDNode>(N0) && VT.isFloatingPoint() && !VT.isVector() && 8356 TLI.isOperationLegal(ISD::ConstantFP, VT)) || 8357 (isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() && 8358 TLI.isOperationLegal(ISD::Constant, VT))) 8359 return DAG.getBitcast(VT, N0); 8360 } 8361 8362 // (conv (conv x, t1), t2) -> (conv x, t2) 8363 if (N0.getOpcode() == ISD::BITCAST) 8364 return DAG.getBitcast(VT, N0.getOperand(0)); 8365 8366 // fold (conv (load x)) -> (load (conv*)x) 8367 // If the resultant load doesn't need a higher alignment than the original! 8368 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 8369 // Do not change the width of a volatile load. 8370 !cast<LoadSDNode>(N0)->isVolatile() && 8371 // Do not remove the cast if the types differ in endian layout. 8372 TLI.hasBigEndianPartOrdering(N0.getValueType(), DAG.getDataLayout()) == 8373 TLI.hasBigEndianPartOrdering(VT, DAG.getDataLayout()) && 8374 (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT)) && 8375 TLI.isLoadBitCastBeneficial(N0.getValueType(), VT)) { 8376 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 8377 unsigned OrigAlign = LN0->getAlignment(); 8378 8379 bool Fast = false; 8380 if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, 8381 LN0->getAddressSpace(), OrigAlign, &Fast) && 8382 Fast) { 8383 SDValue Load = 8384 DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), 8385 LN0->getPointerInfo(), OrigAlign, 8386 LN0->getMemOperand()->getFlags(), LN0->getAAInfo()); 8387 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1)); 8388 return Load; 8389 } 8390 } 8391 8392 if (SDValue V = foldBitcastedFPLogic(N, DAG, TLI)) 8393 return V; 8394 8395 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit) 8396 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit)) 8397 // 8398 // For ppc_fp128: 8399 // fold (bitcast (fneg x)) -> 8400 // flipbit = signbit 8401 // (xor (bitcast x) (build_pair flipbit, flipbit)) 8402 // 8403 // fold (bitcast (fabs x)) -> 8404 // flipbit = (and (extract_element (bitcast x), 0), signbit) 8405 // (xor (bitcast x) (build_pair flipbit, flipbit)) 8406 // This often reduces constant pool loads. 8407 if (((N0.getOpcode() == ISD::FNEG && !TLI.isFNegFree(N0.getValueType())) || 8408 (N0.getOpcode() == ISD::FABS && !TLI.isFAbsFree(N0.getValueType()))) && 8409 N0.getNode()->hasOneUse() && VT.isInteger() && 8410 !VT.isVector() && !N0.getValueType().isVector()) { 8411 SDValue NewConv = DAG.getBitcast(VT, N0.getOperand(0)); 8412 AddToWorklist(NewConv.getNode()); 8413 8414 SDLoc DL(N); 8415 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) { 8416 assert(VT.getSizeInBits() == 128); 8417 SDValue SignBit = DAG.getConstant( 8418 APInt::getSignBit(VT.getSizeInBits() / 2), SDLoc(N0), MVT::i64); 8419 SDValue FlipBit; 8420 if (N0.getOpcode() == ISD::FNEG) { 8421 FlipBit = SignBit; 8422 AddToWorklist(FlipBit.getNode()); 8423 } else { 8424 assert(N0.getOpcode() == ISD::FABS); 8425 SDValue Hi = 8426 DAG.getNode(ISD::EXTRACT_ELEMENT, SDLoc(NewConv), MVT::i64, NewConv, 8427 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG), 8428 SDLoc(NewConv))); 8429 AddToWorklist(Hi.getNode()); 8430 FlipBit = DAG.getNode(ISD::AND, SDLoc(N0), MVT::i64, Hi, SignBit); 8431 AddToWorklist(FlipBit.getNode()); 8432 } 8433 SDValue FlipBits = 8434 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit); 8435 AddToWorklist(FlipBits.getNode()); 8436 return DAG.getNode(ISD::XOR, DL, VT, NewConv, FlipBits); 8437 } 8438 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 8439 if (N0.getOpcode() == ISD::FNEG) 8440 return DAG.getNode(ISD::XOR, DL, VT, 8441 NewConv, DAG.getConstant(SignBit, DL, VT)); 8442 assert(N0.getOpcode() == ISD::FABS); 8443 return DAG.getNode(ISD::AND, DL, VT, 8444 NewConv, DAG.getConstant(~SignBit, DL, VT)); 8445 } 8446 8447 // fold (bitconvert (fcopysign cst, x)) -> 8448 // (or (and (bitconvert x), sign), (and cst, (not sign))) 8449 // Note that we don't handle (copysign x, cst) because this can always be 8450 // folded to an fneg or fabs. 8451 // 8452 // For ppc_fp128: 8453 // fold (bitcast (fcopysign cst, x)) -> 8454 // flipbit = (and (extract_element 8455 // (xor (bitcast cst), (bitcast x)), 0), 8456 // signbit) 8457 // (xor (bitcast cst) (build_pair flipbit, flipbit)) 8458 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse() && 8459 isa<ConstantFPSDNode>(N0.getOperand(0)) && 8460 VT.isInteger() && !VT.isVector()) { 8461 unsigned OrigXWidth = N0.getOperand(1).getValueSizeInBits(); 8462 EVT IntXVT = EVT::getIntegerVT(*DAG.getContext(), OrigXWidth); 8463 if (isTypeLegal(IntXVT)) { 8464 SDValue X = DAG.getBitcast(IntXVT, N0.getOperand(1)); 8465 AddToWorklist(X.getNode()); 8466 8467 // If X has a different width than the result/lhs, sext it or truncate it. 8468 unsigned VTWidth = VT.getSizeInBits(); 8469 if (OrigXWidth < VTWidth) { 8470 X = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, X); 8471 AddToWorklist(X.getNode()); 8472 } else if (OrigXWidth > VTWidth) { 8473 // To get the sign bit in the right place, we have to shift it right 8474 // before truncating. 8475 SDLoc DL(X); 8476 X = DAG.getNode(ISD::SRL, DL, 8477 X.getValueType(), X, 8478 DAG.getConstant(OrigXWidth-VTWidth, DL, 8479 X.getValueType())); 8480 AddToWorklist(X.getNode()); 8481 X = DAG.getNode(ISD::TRUNCATE, SDLoc(X), VT, X); 8482 AddToWorklist(X.getNode()); 8483 } 8484 8485 if (N0.getValueType() == MVT::ppcf128 && !LegalTypes) { 8486 APInt SignBit = APInt::getSignBit(VT.getSizeInBits() / 2); 8487 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0)); 8488 AddToWorklist(Cst.getNode()); 8489 SDValue X = DAG.getBitcast(VT, N0.getOperand(1)); 8490 AddToWorklist(X.getNode()); 8491 SDValue XorResult = DAG.getNode(ISD::XOR, SDLoc(N0), VT, Cst, X); 8492 AddToWorklist(XorResult.getNode()); 8493 SDValue XorResult64 = DAG.getNode( 8494 ISD::EXTRACT_ELEMENT, SDLoc(XorResult), MVT::i64, XorResult, 8495 DAG.getIntPtrConstant(getPPCf128HiElementSelector(DAG), 8496 SDLoc(XorResult))); 8497 AddToWorklist(XorResult64.getNode()); 8498 SDValue FlipBit = 8499 DAG.getNode(ISD::AND, SDLoc(XorResult64), MVT::i64, XorResult64, 8500 DAG.getConstant(SignBit, SDLoc(XorResult64), MVT::i64)); 8501 AddToWorklist(FlipBit.getNode()); 8502 SDValue FlipBits = 8503 DAG.getNode(ISD::BUILD_PAIR, SDLoc(N0), VT, FlipBit, FlipBit); 8504 AddToWorklist(FlipBits.getNode()); 8505 return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits); 8506 } 8507 APInt SignBit = APInt::getSignBit(VT.getSizeInBits()); 8508 X = DAG.getNode(ISD::AND, SDLoc(X), VT, 8509 X, DAG.getConstant(SignBit, SDLoc(X), VT)); 8510 AddToWorklist(X.getNode()); 8511 8512 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0)); 8513 Cst = DAG.getNode(ISD::AND, SDLoc(Cst), VT, 8514 Cst, DAG.getConstant(~SignBit, SDLoc(Cst), VT)); 8515 AddToWorklist(Cst.getNode()); 8516 8517 return DAG.getNode(ISD::OR, SDLoc(N), VT, X, Cst); 8518 } 8519 } 8520 8521 // bitconvert(build_pair(ld, ld)) -> ld iff load locations are consecutive. 8522 if (N0.getOpcode() == ISD::BUILD_PAIR) 8523 if (SDValue CombineLD = CombineConsecutiveLoads(N0.getNode(), VT)) 8524 return CombineLD; 8525 8526 // Remove double bitcasts from shuffles - this is often a legacy of 8527 // XformToShuffleWithZero being used to combine bitmaskings (of 8528 // float vectors bitcast to integer vectors) into shuffles. 8529 // bitcast(shuffle(bitcast(s0),bitcast(s1))) -> shuffle(s0,s1) 8530 if (Level < AfterLegalizeDAG && TLI.isTypeLegal(VT) && VT.isVector() && 8531 N0->getOpcode() == ISD::VECTOR_SHUFFLE && 8532 VT.getVectorNumElements() >= N0.getValueType().getVectorNumElements() && 8533 !(VT.getVectorNumElements() % N0.getValueType().getVectorNumElements())) { 8534 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N0); 8535 8536 // If operands are a bitcast, peek through if it casts the original VT. 8537 // If operands are a constant, just bitcast back to original VT. 8538 auto PeekThroughBitcast = [&](SDValue Op) { 8539 if (Op.getOpcode() == ISD::BITCAST && 8540 Op.getOperand(0).getValueType() == VT) 8541 return SDValue(Op.getOperand(0)); 8542 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) || 8543 ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) 8544 return DAG.getBitcast(VT, Op); 8545 return SDValue(); 8546 }; 8547 8548 SDValue SV0 = PeekThroughBitcast(N0->getOperand(0)); 8549 SDValue SV1 = PeekThroughBitcast(N0->getOperand(1)); 8550 if (!(SV0 && SV1)) 8551 return SDValue(); 8552 8553 int MaskScale = 8554 VT.getVectorNumElements() / N0.getValueType().getVectorNumElements(); 8555 SmallVector<int, 8> NewMask; 8556 for (int M : SVN->getMask()) 8557 for (int i = 0; i != MaskScale; ++i) 8558 NewMask.push_back(M < 0 ? -1 : M * MaskScale + i); 8559 8560 bool LegalMask = TLI.isShuffleMaskLegal(NewMask, VT); 8561 if (!LegalMask) { 8562 std::swap(SV0, SV1); 8563 ShuffleVectorSDNode::commuteMask(NewMask); 8564 LegalMask = TLI.isShuffleMaskLegal(NewMask, VT); 8565 } 8566 8567 if (LegalMask) 8568 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, NewMask); 8569 } 8570 8571 return SDValue(); 8572 } 8573 8574 SDValue DAGCombiner::visitBUILD_PAIR(SDNode *N) { 8575 EVT VT = N->getValueType(0); 8576 return CombineConsecutiveLoads(N, VT); 8577 } 8578 8579 /// We know that BV is a build_vector node with Constant, ConstantFP or Undef 8580 /// operands. DstEltVT indicates the destination element value type. 8581 SDValue DAGCombiner:: 8582 ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) { 8583 EVT SrcEltVT = BV->getValueType(0).getVectorElementType(); 8584 8585 // If this is already the right type, we're done. 8586 if (SrcEltVT == DstEltVT) return SDValue(BV, 0); 8587 8588 unsigned SrcBitSize = SrcEltVT.getSizeInBits(); 8589 unsigned DstBitSize = DstEltVT.getSizeInBits(); 8590 8591 // If this is a conversion of N elements of one type to N elements of another 8592 // type, convert each element. This handles FP<->INT cases. 8593 if (SrcBitSize == DstBitSize) { 8594 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 8595 BV->getValueType(0).getVectorNumElements()); 8596 8597 // Due to the FP element handling below calling this routine recursively, 8598 // we can end up with a scalar-to-vector node here. 8599 if (BV->getOpcode() == ISD::SCALAR_TO_VECTOR) 8600 return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(BV), VT, 8601 DAG.getBitcast(DstEltVT, BV->getOperand(0))); 8602 8603 SmallVector<SDValue, 8> Ops; 8604 for (SDValue Op : BV->op_values()) { 8605 // If the vector element type is not legal, the BUILD_VECTOR operands 8606 // are promoted and implicitly truncated. Make that explicit here. 8607 if (Op.getValueType() != SrcEltVT) 8608 Op = DAG.getNode(ISD::TRUNCATE, SDLoc(BV), SrcEltVT, Op); 8609 Ops.push_back(DAG.getBitcast(DstEltVT, Op)); 8610 AddToWorklist(Ops.back().getNode()); 8611 } 8612 return DAG.getBuildVector(VT, SDLoc(BV), Ops); 8613 } 8614 8615 // Otherwise, we're growing or shrinking the elements. To avoid having to 8616 // handle annoying details of growing/shrinking FP values, we convert them to 8617 // int first. 8618 if (SrcEltVT.isFloatingPoint()) { 8619 // Convert the input float vector to a int vector where the elements are the 8620 // same sizes. 8621 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltVT.getSizeInBits()); 8622 BV = ConstantFoldBITCASTofBUILD_VECTOR(BV, IntVT).getNode(); 8623 SrcEltVT = IntVT; 8624 } 8625 8626 // Now we know the input is an integer vector. If the output is a FP type, 8627 // convert to integer first, then to FP of the right size. 8628 if (DstEltVT.isFloatingPoint()) { 8629 EVT TmpVT = EVT::getIntegerVT(*DAG.getContext(), DstEltVT.getSizeInBits()); 8630 SDNode *Tmp = ConstantFoldBITCASTofBUILD_VECTOR(BV, TmpVT).getNode(); 8631 8632 // Next, convert to FP elements of the same size. 8633 return ConstantFoldBITCASTofBUILD_VECTOR(Tmp, DstEltVT); 8634 } 8635 8636 SDLoc DL(BV); 8637 8638 // Okay, we know the src/dst types are both integers of differing types. 8639 // Handling growing first. 8640 assert(SrcEltVT.isInteger() && DstEltVT.isInteger()); 8641 if (SrcBitSize < DstBitSize) { 8642 unsigned NumInputsPerOutput = DstBitSize/SrcBitSize; 8643 8644 SmallVector<SDValue, 8> Ops; 8645 for (unsigned i = 0, e = BV->getNumOperands(); i != e; 8646 i += NumInputsPerOutput) { 8647 bool isLE = DAG.getDataLayout().isLittleEndian(); 8648 APInt NewBits = APInt(DstBitSize, 0); 8649 bool EltIsUndef = true; 8650 for (unsigned j = 0; j != NumInputsPerOutput; ++j) { 8651 // Shift the previously computed bits over. 8652 NewBits <<= SrcBitSize; 8653 SDValue Op = BV->getOperand(i+ (isLE ? (NumInputsPerOutput-j-1) : j)); 8654 if (Op.isUndef()) continue; 8655 EltIsUndef = false; 8656 8657 NewBits |= cast<ConstantSDNode>(Op)->getAPIntValue(). 8658 zextOrTrunc(SrcBitSize).zext(DstBitSize); 8659 } 8660 8661 if (EltIsUndef) 8662 Ops.push_back(DAG.getUNDEF(DstEltVT)); 8663 else 8664 Ops.push_back(DAG.getConstant(NewBits, DL, DstEltVT)); 8665 } 8666 8667 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, Ops.size()); 8668 return DAG.getBuildVector(VT, DL, Ops); 8669 } 8670 8671 // Finally, this must be the case where we are shrinking elements: each input 8672 // turns into multiple outputs. 8673 unsigned NumOutputsPerInput = SrcBitSize/DstBitSize; 8674 EVT VT = EVT::getVectorVT(*DAG.getContext(), DstEltVT, 8675 NumOutputsPerInput*BV->getNumOperands()); 8676 SmallVector<SDValue, 8> Ops; 8677 8678 for (const SDValue &Op : BV->op_values()) { 8679 if (Op.isUndef()) { 8680 Ops.append(NumOutputsPerInput, DAG.getUNDEF(DstEltVT)); 8681 continue; 8682 } 8683 8684 APInt OpVal = cast<ConstantSDNode>(Op)-> 8685 getAPIntValue().zextOrTrunc(SrcBitSize); 8686 8687 for (unsigned j = 0; j != NumOutputsPerInput; ++j) { 8688 APInt ThisVal = OpVal.trunc(DstBitSize); 8689 Ops.push_back(DAG.getConstant(ThisVal, DL, DstEltVT)); 8690 OpVal = OpVal.lshr(DstBitSize); 8691 } 8692 8693 // For big endian targets, swap the order of the pieces of each element. 8694 if (DAG.getDataLayout().isBigEndian()) 8695 std::reverse(Ops.end()-NumOutputsPerInput, Ops.end()); 8696 } 8697 8698 return DAG.getBuildVector(VT, DL, Ops); 8699 } 8700 8701 static bool isContractable(SDNode *N) { 8702 SDNodeFlags F = cast<BinaryWithFlagsSDNode>(N)->Flags; 8703 return F.hasAllowContract() || F.hasUnsafeAlgebra(); 8704 } 8705 8706 /// Try to perform FMA combining on a given FADD node. 8707 SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) { 8708 SDValue N0 = N->getOperand(0); 8709 SDValue N1 = N->getOperand(1); 8710 EVT VT = N->getValueType(0); 8711 SDLoc SL(N); 8712 8713 const TargetOptions &Options = DAG.getTarget().Options; 8714 8715 // Floating-point multiply-add with intermediate rounding. 8716 bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT)); 8717 8718 // Floating-point multiply-add without intermediate rounding. 8719 bool HasFMA = 8720 TLI.isFMAFasterThanFMulAndFAdd(VT) && 8721 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); 8722 8723 // No valid opcode, do not combine. 8724 if (!HasFMAD && !HasFMA) 8725 return SDValue(); 8726 8727 bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast || 8728 Options.UnsafeFPMath || HasFMAD); 8729 // If the addition is not contractable, do not combine. 8730 if (!AllowFusionGlobally && !isContractable(N)) 8731 return SDValue(); 8732 8733 const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo(); 8734 if (STI && STI->generateFMAsInMachineCombiner(OptLevel)) 8735 return SDValue(); 8736 8737 // Always prefer FMAD to FMA for precision. 8738 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; 8739 bool Aggressive = TLI.enableAggressiveFMAFusion(VT); 8740 bool LookThroughFPExt = TLI.isFPExtFree(VT); 8741 8742 // Is the node an FMUL and contractable either due to global flags or 8743 // SDNodeFlags. 8744 auto isContractableFMUL = [AllowFusionGlobally](SDValue N) { 8745 if (N.getOpcode() != ISD::FMUL) 8746 return false; 8747 return AllowFusionGlobally || isContractable(N.getNode()); 8748 }; 8749 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 8750 // prefer to fold the multiply with fewer uses. 8751 if (Aggressive && isContractableFMUL(N0) && isContractableFMUL(N1)) { 8752 if (N0.getNode()->use_size() > N1.getNode()->use_size()) 8753 std::swap(N0, N1); 8754 } 8755 8756 // fold (fadd (fmul x, y), z) -> (fma x, y, z) 8757 if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) { 8758 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8759 N0.getOperand(0), N0.getOperand(1), N1); 8760 } 8761 8762 // fold (fadd x, (fmul y, z)) -> (fma y, z, x) 8763 // Note: Commutes FADD operands. 8764 if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) { 8765 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8766 N1.getOperand(0), N1.getOperand(1), N0); 8767 } 8768 8769 // Look through FP_EXTEND nodes to do more combining. 8770 if (LookThroughFPExt) { 8771 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) 8772 if (N0.getOpcode() == ISD::FP_EXTEND) { 8773 SDValue N00 = N0.getOperand(0); 8774 if (isContractableFMUL(N00)) 8775 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8776 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8777 N00.getOperand(0)), 8778 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8779 N00.getOperand(1)), N1); 8780 } 8781 8782 // fold (fadd x, (fpext (fmul y, z))) -> (fma (fpext y), (fpext z), x) 8783 // Note: Commutes FADD operands. 8784 if (N1.getOpcode() == ISD::FP_EXTEND) { 8785 SDValue N10 = N1.getOperand(0); 8786 if (isContractableFMUL(N10)) 8787 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8788 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8789 N10.getOperand(0)), 8790 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8791 N10.getOperand(1)), N0); 8792 } 8793 } 8794 8795 // More folding opportunities when target permits. 8796 if (Aggressive) { 8797 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, z)) 8798 // FIXME: The UnsafeAlgebra flag should be propagated to FMA/FMAD, but FMF 8799 // are currently only supported on binary nodes. 8800 if (Options.UnsafeFPMath && 8801 N0.getOpcode() == PreferredFusedOpcode && 8802 N0.getOperand(2).getOpcode() == ISD::FMUL && 8803 N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) { 8804 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8805 N0.getOperand(0), N0.getOperand(1), 8806 DAG.getNode(PreferredFusedOpcode, SL, VT, 8807 N0.getOperand(2).getOperand(0), 8808 N0.getOperand(2).getOperand(1), 8809 N1)); 8810 } 8811 8812 // fold (fadd x, (fma y, z, (fmul u, v)) -> (fma y, z (fma u, v, x)) 8813 // FIXME: The UnsafeAlgebra flag should be propagated to FMA/FMAD, but FMF 8814 // are currently only supported on binary nodes. 8815 if (Options.UnsafeFPMath && 8816 N1->getOpcode() == PreferredFusedOpcode && 8817 N1.getOperand(2).getOpcode() == ISD::FMUL && 8818 N1->hasOneUse() && N1.getOperand(2)->hasOneUse()) { 8819 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8820 N1.getOperand(0), N1.getOperand(1), 8821 DAG.getNode(PreferredFusedOpcode, SL, VT, 8822 N1.getOperand(2).getOperand(0), 8823 N1.getOperand(2).getOperand(1), 8824 N0)); 8825 } 8826 8827 if (LookThroughFPExt) { 8828 // fold (fadd (fma x, y, (fpext (fmul u, v))), z) 8829 // -> (fma x, y, (fma (fpext u), (fpext v), z)) 8830 auto FoldFAddFMAFPExtFMul = [&] ( 8831 SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z) { 8832 return DAG.getNode(PreferredFusedOpcode, SL, VT, X, Y, 8833 DAG.getNode(PreferredFusedOpcode, SL, VT, 8834 DAG.getNode(ISD::FP_EXTEND, SL, VT, U), 8835 DAG.getNode(ISD::FP_EXTEND, SL, VT, V), 8836 Z)); 8837 }; 8838 if (N0.getOpcode() == PreferredFusedOpcode) { 8839 SDValue N02 = N0.getOperand(2); 8840 if (N02.getOpcode() == ISD::FP_EXTEND) { 8841 SDValue N020 = N02.getOperand(0); 8842 if (isContractableFMUL(N020)) 8843 return FoldFAddFMAFPExtFMul(N0.getOperand(0), N0.getOperand(1), 8844 N020.getOperand(0), N020.getOperand(1), 8845 N1); 8846 } 8847 } 8848 8849 // fold (fadd (fpext (fma x, y, (fmul u, v))), z) 8850 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) 8851 // FIXME: This turns two single-precision and one double-precision 8852 // operation into two double-precision operations, which might not be 8853 // interesting for all targets, especially GPUs. 8854 auto FoldFAddFPExtFMAFMul = [&] ( 8855 SDValue X, SDValue Y, SDValue U, SDValue V, SDValue Z) { 8856 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8857 DAG.getNode(ISD::FP_EXTEND, SL, VT, X), 8858 DAG.getNode(ISD::FP_EXTEND, SL, VT, Y), 8859 DAG.getNode(PreferredFusedOpcode, SL, VT, 8860 DAG.getNode(ISD::FP_EXTEND, SL, VT, U), 8861 DAG.getNode(ISD::FP_EXTEND, SL, VT, V), 8862 Z)); 8863 }; 8864 if (N0.getOpcode() == ISD::FP_EXTEND) { 8865 SDValue N00 = N0.getOperand(0); 8866 if (N00.getOpcode() == PreferredFusedOpcode) { 8867 SDValue N002 = N00.getOperand(2); 8868 if (isContractableFMUL(N002)) 8869 return FoldFAddFPExtFMAFMul(N00.getOperand(0), N00.getOperand(1), 8870 N002.getOperand(0), N002.getOperand(1), 8871 N1); 8872 } 8873 } 8874 8875 // fold (fadd x, (fma y, z, (fpext (fmul u, v))) 8876 // -> (fma y, z, (fma (fpext u), (fpext v), x)) 8877 if (N1.getOpcode() == PreferredFusedOpcode) { 8878 SDValue N12 = N1.getOperand(2); 8879 if (N12.getOpcode() == ISD::FP_EXTEND) { 8880 SDValue N120 = N12.getOperand(0); 8881 if (isContractableFMUL(N120)) 8882 return FoldFAddFMAFPExtFMul(N1.getOperand(0), N1.getOperand(1), 8883 N120.getOperand(0), N120.getOperand(1), 8884 N0); 8885 } 8886 } 8887 8888 // fold (fadd x, (fpext (fma y, z, (fmul u, v))) 8889 // -> (fma (fpext y), (fpext z), (fma (fpext u), (fpext v), x)) 8890 // FIXME: This turns two single-precision and one double-precision 8891 // operation into two double-precision operations, which might not be 8892 // interesting for all targets, especially GPUs. 8893 if (N1.getOpcode() == ISD::FP_EXTEND) { 8894 SDValue N10 = N1.getOperand(0); 8895 if (N10.getOpcode() == PreferredFusedOpcode) { 8896 SDValue N102 = N10.getOperand(2); 8897 if (isContractableFMUL(N102)) 8898 return FoldFAddFPExtFMAFMul(N10.getOperand(0), N10.getOperand(1), 8899 N102.getOperand(0), N102.getOperand(1), 8900 N0); 8901 } 8902 } 8903 } 8904 } 8905 8906 return SDValue(); 8907 } 8908 8909 /// Try to perform FMA combining on a given FSUB node. 8910 SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) { 8911 SDValue N0 = N->getOperand(0); 8912 SDValue N1 = N->getOperand(1); 8913 EVT VT = N->getValueType(0); 8914 SDLoc SL(N); 8915 8916 const TargetOptions &Options = DAG.getTarget().Options; 8917 // Floating-point multiply-add with intermediate rounding. 8918 bool HasFMAD = (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT)); 8919 8920 // Floating-point multiply-add without intermediate rounding. 8921 bool HasFMA = 8922 TLI.isFMAFasterThanFMulAndFAdd(VT) && 8923 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); 8924 8925 // No valid opcode, do not combine. 8926 if (!HasFMAD && !HasFMA) 8927 return SDValue(); 8928 8929 bool AllowFusionGlobally = (Options.AllowFPOpFusion == FPOpFusion::Fast || 8930 Options.UnsafeFPMath || HasFMAD); 8931 // If the subtraction is not contractable, do not combine. 8932 if (!AllowFusionGlobally && !isContractable(N)) 8933 return SDValue(); 8934 8935 const SelectionDAGTargetInfo *STI = DAG.getSubtarget().getSelectionDAGInfo(); 8936 if (STI && STI->generateFMAsInMachineCombiner(OptLevel)) 8937 return SDValue(); 8938 8939 // Always prefer FMAD to FMA for precision. 8940 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; 8941 bool Aggressive = TLI.enableAggressiveFMAFusion(VT); 8942 bool LookThroughFPExt = TLI.isFPExtFree(VT); 8943 8944 // Is the node an FMUL and contractable either due to global flags or 8945 // SDNodeFlags. 8946 auto isContractableFMUL = [AllowFusionGlobally](SDValue N) { 8947 if (N.getOpcode() != ISD::FMUL) 8948 return false; 8949 return AllowFusionGlobally || isContractable(N.getNode()); 8950 }; 8951 8952 // fold (fsub (fmul x, y), z) -> (fma x, y, (fneg z)) 8953 if (isContractableFMUL(N0) && (Aggressive || N0->hasOneUse())) { 8954 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8955 N0.getOperand(0), N0.getOperand(1), 8956 DAG.getNode(ISD::FNEG, SL, VT, N1)); 8957 } 8958 8959 // fold (fsub x, (fmul y, z)) -> (fma (fneg y), z, x) 8960 // Note: Commutes FSUB operands. 8961 if (isContractableFMUL(N1) && (Aggressive || N1->hasOneUse())) 8962 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8963 DAG.getNode(ISD::FNEG, SL, VT, 8964 N1.getOperand(0)), 8965 N1.getOperand(1), N0); 8966 8967 // fold (fsub (fneg (fmul, x, y)), z) -> (fma (fneg x), y, (fneg z)) 8968 if (N0.getOpcode() == ISD::FNEG && isContractableFMUL(N0.getOperand(0)) && 8969 (Aggressive || (N0->hasOneUse() && N0.getOperand(0).hasOneUse()))) { 8970 SDValue N00 = N0.getOperand(0).getOperand(0); 8971 SDValue N01 = N0.getOperand(0).getOperand(1); 8972 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8973 DAG.getNode(ISD::FNEG, SL, VT, N00), N01, 8974 DAG.getNode(ISD::FNEG, SL, VT, N1)); 8975 } 8976 8977 // Look through FP_EXTEND nodes to do more combining. 8978 if (LookThroughFPExt) { 8979 // fold (fsub (fpext (fmul x, y)), z) 8980 // -> (fma (fpext x), (fpext y), (fneg z)) 8981 if (N0.getOpcode() == ISD::FP_EXTEND) { 8982 SDValue N00 = N0.getOperand(0); 8983 if (isContractableFMUL(N00)) 8984 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8985 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8986 N00.getOperand(0)), 8987 DAG.getNode(ISD::FP_EXTEND, SL, VT, 8988 N00.getOperand(1)), 8989 DAG.getNode(ISD::FNEG, SL, VT, N1)); 8990 } 8991 8992 // fold (fsub x, (fpext (fmul y, z))) 8993 // -> (fma (fneg (fpext y)), (fpext z), x) 8994 // Note: Commutes FSUB operands. 8995 if (N1.getOpcode() == ISD::FP_EXTEND) { 8996 SDValue N10 = N1.getOperand(0); 8997 if (isContractableFMUL(N10)) 8998 return DAG.getNode(PreferredFusedOpcode, SL, VT, 8999 DAG.getNode(ISD::FNEG, SL, VT, 9000 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9001 N10.getOperand(0))), 9002 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9003 N10.getOperand(1)), 9004 N0); 9005 } 9006 9007 // fold (fsub (fpext (fneg (fmul, x, y))), z) 9008 // -> (fneg (fma (fpext x), (fpext y), z)) 9009 // Note: This could be removed with appropriate canonicalization of the 9010 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the 9011 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent 9012 // from implementing the canonicalization in visitFSUB. 9013 if (N0.getOpcode() == ISD::FP_EXTEND) { 9014 SDValue N00 = N0.getOperand(0); 9015 if (N00.getOpcode() == ISD::FNEG) { 9016 SDValue N000 = N00.getOperand(0); 9017 if (isContractableFMUL(N000)) { 9018 return DAG.getNode(ISD::FNEG, SL, VT, 9019 DAG.getNode(PreferredFusedOpcode, SL, VT, 9020 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9021 N000.getOperand(0)), 9022 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9023 N000.getOperand(1)), 9024 N1)); 9025 } 9026 } 9027 } 9028 9029 // fold (fsub (fneg (fpext (fmul, x, y))), z) 9030 // -> (fneg (fma (fpext x)), (fpext y), z) 9031 // Note: This could be removed with appropriate canonicalization of the 9032 // input expression into (fneg (fadd (fpext (fmul, x, y)), z). However, the 9033 // orthogonal flags -fp-contract=fast and -enable-unsafe-fp-math prevent 9034 // from implementing the canonicalization in visitFSUB. 9035 if (N0.getOpcode() == ISD::FNEG) { 9036 SDValue N00 = N0.getOperand(0); 9037 if (N00.getOpcode() == ISD::FP_EXTEND) { 9038 SDValue N000 = N00.getOperand(0); 9039 if (isContractableFMUL(N000)) { 9040 return DAG.getNode(ISD::FNEG, SL, VT, 9041 DAG.getNode(PreferredFusedOpcode, SL, VT, 9042 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9043 N000.getOperand(0)), 9044 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9045 N000.getOperand(1)), 9046 N1)); 9047 } 9048 } 9049 } 9050 9051 } 9052 9053 // More folding opportunities when target permits. 9054 if (Aggressive) { 9055 // fold (fsub (fma x, y, (fmul u, v)), z) 9056 // -> (fma x, y (fma u, v, (fneg z))) 9057 // FIXME: The UnsafeAlgebra flag should be propagated to FMA/FMAD, but FMF 9058 // are currently only supported on binary nodes. 9059 if (Options.UnsafeFPMath && N0.getOpcode() == PreferredFusedOpcode && 9060 isContractableFMUL(N0.getOperand(2)) && N0->hasOneUse() && 9061 N0.getOperand(2)->hasOneUse()) { 9062 return DAG.getNode(PreferredFusedOpcode, SL, VT, 9063 N0.getOperand(0), N0.getOperand(1), 9064 DAG.getNode(PreferredFusedOpcode, SL, VT, 9065 N0.getOperand(2).getOperand(0), 9066 N0.getOperand(2).getOperand(1), 9067 DAG.getNode(ISD::FNEG, SL, VT, 9068 N1))); 9069 } 9070 9071 // fold (fsub x, (fma y, z, (fmul u, v))) 9072 // -> (fma (fneg y), z, (fma (fneg u), v, x)) 9073 // FIXME: The UnsafeAlgebra flag should be propagated to FMA/FMAD, but FMF 9074 // are currently only supported on binary nodes. 9075 if (Options.UnsafeFPMath && N1.getOpcode() == PreferredFusedOpcode && 9076 isContractableFMUL(N1.getOperand(2))) { 9077 SDValue N20 = N1.getOperand(2).getOperand(0); 9078 SDValue N21 = N1.getOperand(2).getOperand(1); 9079 return DAG.getNode(PreferredFusedOpcode, SL, VT, 9080 DAG.getNode(ISD::FNEG, SL, VT, 9081 N1.getOperand(0)), 9082 N1.getOperand(1), 9083 DAG.getNode(PreferredFusedOpcode, SL, VT, 9084 DAG.getNode(ISD::FNEG, SL, VT, N20), 9085 9086 N21, N0)); 9087 } 9088 9089 if (LookThroughFPExt) { 9090 // fold (fsub (fma x, y, (fpext (fmul u, v))), z) 9091 // -> (fma x, y (fma (fpext u), (fpext v), (fneg z))) 9092 if (N0.getOpcode() == PreferredFusedOpcode) { 9093 SDValue N02 = N0.getOperand(2); 9094 if (N02.getOpcode() == ISD::FP_EXTEND) { 9095 SDValue N020 = N02.getOperand(0); 9096 if (isContractableFMUL(N020)) 9097 return DAG.getNode(PreferredFusedOpcode, SL, VT, 9098 N0.getOperand(0), N0.getOperand(1), 9099 DAG.getNode(PreferredFusedOpcode, SL, VT, 9100 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9101 N020.getOperand(0)), 9102 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9103 N020.getOperand(1)), 9104 DAG.getNode(ISD::FNEG, SL, VT, 9105 N1))); 9106 } 9107 } 9108 9109 // fold (fsub (fpext (fma x, y, (fmul u, v))), z) 9110 // -> (fma (fpext x), (fpext y), 9111 // (fma (fpext u), (fpext v), (fneg z))) 9112 // FIXME: This turns two single-precision and one double-precision 9113 // operation into two double-precision operations, which might not be 9114 // interesting for all targets, especially GPUs. 9115 if (N0.getOpcode() == ISD::FP_EXTEND) { 9116 SDValue N00 = N0.getOperand(0); 9117 if (N00.getOpcode() == PreferredFusedOpcode) { 9118 SDValue N002 = N00.getOperand(2); 9119 if (isContractableFMUL(N002)) 9120 return DAG.getNode(PreferredFusedOpcode, SL, VT, 9121 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9122 N00.getOperand(0)), 9123 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9124 N00.getOperand(1)), 9125 DAG.getNode(PreferredFusedOpcode, SL, VT, 9126 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9127 N002.getOperand(0)), 9128 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9129 N002.getOperand(1)), 9130 DAG.getNode(ISD::FNEG, SL, VT, 9131 N1))); 9132 } 9133 } 9134 9135 // fold (fsub x, (fma y, z, (fpext (fmul u, v)))) 9136 // -> (fma (fneg y), z, (fma (fneg (fpext u)), (fpext v), x)) 9137 if (N1.getOpcode() == PreferredFusedOpcode && 9138 N1.getOperand(2).getOpcode() == ISD::FP_EXTEND) { 9139 SDValue N120 = N1.getOperand(2).getOperand(0); 9140 if (isContractableFMUL(N120)) { 9141 SDValue N1200 = N120.getOperand(0); 9142 SDValue N1201 = N120.getOperand(1); 9143 return DAG.getNode(PreferredFusedOpcode, SL, VT, 9144 DAG.getNode(ISD::FNEG, SL, VT, N1.getOperand(0)), 9145 N1.getOperand(1), 9146 DAG.getNode(PreferredFusedOpcode, SL, VT, 9147 DAG.getNode(ISD::FNEG, SL, VT, 9148 DAG.getNode(ISD::FP_EXTEND, SL, 9149 VT, N1200)), 9150 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9151 N1201), 9152 N0)); 9153 } 9154 } 9155 9156 // fold (fsub x, (fpext (fma y, z, (fmul u, v)))) 9157 // -> (fma (fneg (fpext y)), (fpext z), 9158 // (fma (fneg (fpext u)), (fpext v), x)) 9159 // FIXME: This turns two single-precision and one double-precision 9160 // operation into two double-precision operations, which might not be 9161 // interesting for all targets, especially GPUs. 9162 if (N1.getOpcode() == ISD::FP_EXTEND && 9163 N1.getOperand(0).getOpcode() == PreferredFusedOpcode) { 9164 SDValue N100 = N1.getOperand(0).getOperand(0); 9165 SDValue N101 = N1.getOperand(0).getOperand(1); 9166 SDValue N102 = N1.getOperand(0).getOperand(2); 9167 if (isContractableFMUL(N102)) { 9168 SDValue N1020 = N102.getOperand(0); 9169 SDValue N1021 = N102.getOperand(1); 9170 return DAG.getNode(PreferredFusedOpcode, SL, VT, 9171 DAG.getNode(ISD::FNEG, SL, VT, 9172 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9173 N100)), 9174 DAG.getNode(ISD::FP_EXTEND, SL, VT, N101), 9175 DAG.getNode(PreferredFusedOpcode, SL, VT, 9176 DAG.getNode(ISD::FNEG, SL, VT, 9177 DAG.getNode(ISD::FP_EXTEND, SL, 9178 VT, N1020)), 9179 DAG.getNode(ISD::FP_EXTEND, SL, VT, 9180 N1021), 9181 N0)); 9182 } 9183 } 9184 } 9185 } 9186 9187 return SDValue(); 9188 } 9189 9190 /// Try to perform FMA combining on a given FMUL node based on the distributive 9191 /// law x * (y + 1) = x * y + x and variants thereof (commuted versions, 9192 /// subtraction instead of addition). 9193 SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) { 9194 SDValue N0 = N->getOperand(0); 9195 SDValue N1 = N->getOperand(1); 9196 EVT VT = N->getValueType(0); 9197 SDLoc SL(N); 9198 9199 assert(N->getOpcode() == ISD::FMUL && "Expected FMUL Operation"); 9200 9201 const TargetOptions &Options = DAG.getTarget().Options; 9202 9203 // The transforms below are incorrect when x == 0 and y == inf, because the 9204 // intermediate multiplication produces a nan. 9205 if (!Options.NoInfsFPMath) 9206 return SDValue(); 9207 9208 // Floating-point multiply-add without intermediate rounding. 9209 bool HasFMA = 9210 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath) && 9211 TLI.isFMAFasterThanFMulAndFAdd(VT) && 9212 (!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FMA, VT)); 9213 9214 // Floating-point multiply-add with intermediate rounding. This can result 9215 // in a less precise result due to the changed rounding order. 9216 bool HasFMAD = Options.UnsafeFPMath && 9217 (LegalOperations && TLI.isOperationLegal(ISD::FMAD, VT)); 9218 9219 // No valid opcode, do not combine. 9220 if (!HasFMAD && !HasFMA) 9221 return SDValue(); 9222 9223 // Always prefer FMAD to FMA for precision. 9224 unsigned PreferredFusedOpcode = HasFMAD ? ISD::FMAD : ISD::FMA; 9225 bool Aggressive = TLI.enableAggressiveFMAFusion(VT); 9226 9227 // fold (fmul (fadd x, +1.0), y) -> (fma x, y, y) 9228 // fold (fmul (fadd x, -1.0), y) -> (fma x, y, (fneg y)) 9229 auto FuseFADD = [&](SDValue X, SDValue Y) { 9230 if (X.getOpcode() == ISD::FADD && (Aggressive || X->hasOneUse())) { 9231 auto XC1 = isConstOrConstSplatFP(X.getOperand(1)); 9232 if (XC1 && XC1->isExactlyValue(+1.0)) 9233 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, Y); 9234 if (XC1 && XC1->isExactlyValue(-1.0)) 9235 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, 9236 DAG.getNode(ISD::FNEG, SL, VT, Y)); 9237 } 9238 return SDValue(); 9239 }; 9240 9241 if (SDValue FMA = FuseFADD(N0, N1)) 9242 return FMA; 9243 if (SDValue FMA = FuseFADD(N1, N0)) 9244 return FMA; 9245 9246 // fold (fmul (fsub +1.0, x), y) -> (fma (fneg x), y, y) 9247 // fold (fmul (fsub -1.0, x), y) -> (fma (fneg x), y, (fneg y)) 9248 // fold (fmul (fsub x, +1.0), y) -> (fma x, y, (fneg y)) 9249 // fold (fmul (fsub x, -1.0), y) -> (fma x, y, y) 9250 auto FuseFSUB = [&](SDValue X, SDValue Y) { 9251 if (X.getOpcode() == ISD::FSUB && (Aggressive || X->hasOneUse())) { 9252 auto XC0 = isConstOrConstSplatFP(X.getOperand(0)); 9253 if (XC0 && XC0->isExactlyValue(+1.0)) 9254 return DAG.getNode(PreferredFusedOpcode, SL, VT, 9255 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y, 9256 Y); 9257 if (XC0 && XC0->isExactlyValue(-1.0)) 9258 return DAG.getNode(PreferredFusedOpcode, SL, VT, 9259 DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y, 9260 DAG.getNode(ISD::FNEG, SL, VT, Y)); 9261 9262 auto XC1 = isConstOrConstSplatFP(X.getOperand(1)); 9263 if (XC1 && XC1->isExactlyValue(+1.0)) 9264 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, 9265 DAG.getNode(ISD::FNEG, SL, VT, Y)); 9266 if (XC1 && XC1->isExactlyValue(-1.0)) 9267 return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y, Y); 9268 } 9269 return SDValue(); 9270 }; 9271 9272 if (SDValue FMA = FuseFSUB(N0, N1)) 9273 return FMA; 9274 if (SDValue FMA = FuseFSUB(N1, N0)) 9275 return FMA; 9276 9277 return SDValue(); 9278 } 9279 9280 SDValue DAGCombiner::visitFADD(SDNode *N) { 9281 SDValue N0 = N->getOperand(0); 9282 SDValue N1 = N->getOperand(1); 9283 bool N0CFP = isConstantFPBuildVectorOrConstantFP(N0); 9284 bool N1CFP = isConstantFPBuildVectorOrConstantFP(N1); 9285 EVT VT = N->getValueType(0); 9286 SDLoc DL(N); 9287 const TargetOptions &Options = DAG.getTarget().Options; 9288 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 9289 9290 // fold vector ops 9291 if (VT.isVector()) 9292 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 9293 return FoldedVOp; 9294 9295 // fold (fadd c1, c2) -> c1 + c2 9296 if (N0CFP && N1CFP) 9297 return DAG.getNode(ISD::FADD, DL, VT, N0, N1, Flags); 9298 9299 // canonicalize constant to RHS 9300 if (N0CFP && !N1CFP) 9301 return DAG.getNode(ISD::FADD, DL, VT, N1, N0, Flags); 9302 9303 if (SDValue NewSel = foldBinOpIntoSelect(N)) 9304 return NewSel; 9305 9306 // fold (fadd A, (fneg B)) -> (fsub A, B) 9307 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 9308 isNegatibleForFree(N1, LegalOperations, TLI, &Options) == 2) 9309 return DAG.getNode(ISD::FSUB, DL, VT, N0, 9310 GetNegatedExpression(N1, DAG, LegalOperations), Flags); 9311 9312 // fold (fadd (fneg A), B) -> (fsub B, A) 9313 if ((!LegalOperations || TLI.isOperationLegalOrCustom(ISD::FSUB, VT)) && 9314 isNegatibleForFree(N0, LegalOperations, TLI, &Options) == 2) 9315 return DAG.getNode(ISD::FSUB, DL, VT, N1, 9316 GetNegatedExpression(N0, DAG, LegalOperations), Flags); 9317 9318 // FIXME: Auto-upgrade the target/function-level option. 9319 if (Options.NoSignedZerosFPMath || N->getFlags()->hasNoSignedZeros()) { 9320 // fold (fadd A, 0) -> A 9321 if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1)) 9322 if (N1C->isZero()) 9323 return N0; 9324 } 9325 9326 // If 'unsafe math' is enabled, fold lots of things. 9327 if (Options.UnsafeFPMath) { 9328 // No FP constant should be created after legalization as Instruction 9329 // Selection pass has a hard time dealing with FP constants. 9330 bool AllowNewConst = (Level < AfterLegalizeDAG); 9331 9332 // fold (fadd (fadd x, c1), c2) -> (fadd x, (fadd c1, c2)) 9333 if (N1CFP && N0.getOpcode() == ISD::FADD && N0.getNode()->hasOneUse() && 9334 isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) 9335 return DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(0), 9336 DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), N1, 9337 Flags), 9338 Flags); 9339 9340 // If allowed, fold (fadd (fneg x), x) -> 0.0 9341 if (AllowNewConst && N0.getOpcode() == ISD::FNEG && N0.getOperand(0) == N1) 9342 return DAG.getConstantFP(0.0, DL, VT); 9343 9344 // If allowed, fold (fadd x, (fneg x)) -> 0.0 9345 if (AllowNewConst && N1.getOpcode() == ISD::FNEG && N1.getOperand(0) == N0) 9346 return DAG.getConstantFP(0.0, DL, VT); 9347 9348 // We can fold chains of FADD's of the same value into multiplications. 9349 // This transform is not safe in general because we are reducing the number 9350 // of rounding steps. 9351 if (TLI.isOperationLegalOrCustom(ISD::FMUL, VT) && !N0CFP && !N1CFP) { 9352 if (N0.getOpcode() == ISD::FMUL) { 9353 bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0)); 9354 bool CFP01 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(1)); 9355 9356 // (fadd (fmul x, c), x) -> (fmul x, c+1) 9357 if (CFP01 && !CFP00 && N0.getOperand(0) == N1) { 9358 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), 9359 DAG.getConstantFP(1.0, DL, VT), Flags); 9360 return DAG.getNode(ISD::FMUL, DL, VT, N1, NewCFP, Flags); 9361 } 9362 9363 // (fadd (fmul x, c), (fadd x, x)) -> (fmul x, c+2) 9364 if (CFP01 && !CFP00 && N1.getOpcode() == ISD::FADD && 9365 N1.getOperand(0) == N1.getOperand(1) && 9366 N0.getOperand(0) == N1.getOperand(0)) { 9367 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N0.getOperand(1), 9368 DAG.getConstantFP(2.0, DL, VT), Flags); 9369 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), NewCFP, Flags); 9370 } 9371 } 9372 9373 if (N1.getOpcode() == ISD::FMUL) { 9374 bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0)); 9375 bool CFP11 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(1)); 9376 9377 // (fadd x, (fmul x, c)) -> (fmul x, c+1) 9378 if (CFP11 && !CFP10 && N1.getOperand(0) == N0) { 9379 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1), 9380 DAG.getConstantFP(1.0, DL, VT), Flags); 9381 return DAG.getNode(ISD::FMUL, DL, VT, N0, NewCFP, Flags); 9382 } 9383 9384 // (fadd (fadd x, x), (fmul x, c)) -> (fmul x, c+2) 9385 if (CFP11 && !CFP10 && N0.getOpcode() == ISD::FADD && 9386 N0.getOperand(0) == N0.getOperand(1) && 9387 N1.getOperand(0) == N0.getOperand(0)) { 9388 SDValue NewCFP = DAG.getNode(ISD::FADD, DL, VT, N1.getOperand(1), 9389 DAG.getConstantFP(2.0, DL, VT), Flags); 9390 return DAG.getNode(ISD::FMUL, DL, VT, N1.getOperand(0), NewCFP, Flags); 9391 } 9392 } 9393 9394 if (N0.getOpcode() == ISD::FADD && AllowNewConst) { 9395 bool CFP00 = isConstantFPBuildVectorOrConstantFP(N0.getOperand(0)); 9396 // (fadd (fadd x, x), x) -> (fmul x, 3.0) 9397 if (!CFP00 && N0.getOperand(0) == N0.getOperand(1) && 9398 (N0.getOperand(0) == N1)) { 9399 return DAG.getNode(ISD::FMUL, DL, VT, 9400 N1, DAG.getConstantFP(3.0, DL, VT), Flags); 9401 } 9402 } 9403 9404 if (N1.getOpcode() == ISD::FADD && AllowNewConst) { 9405 bool CFP10 = isConstantFPBuildVectorOrConstantFP(N1.getOperand(0)); 9406 // (fadd x, (fadd x, x)) -> (fmul x, 3.0) 9407 if (!CFP10 && N1.getOperand(0) == N1.getOperand(1) && 9408 N1.getOperand(0) == N0) { 9409 return DAG.getNode(ISD::FMUL, DL, VT, 9410 N0, DAG.getConstantFP(3.0, DL, VT), Flags); 9411 } 9412 } 9413 9414 // (fadd (fadd x, x), (fadd x, x)) -> (fmul x, 4.0) 9415 if (AllowNewConst && 9416 N0.getOpcode() == ISD::FADD && N1.getOpcode() == ISD::FADD && 9417 N0.getOperand(0) == N0.getOperand(1) && 9418 N1.getOperand(0) == N1.getOperand(1) && 9419 N0.getOperand(0) == N1.getOperand(0)) { 9420 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), 9421 DAG.getConstantFP(4.0, DL, VT), Flags); 9422 } 9423 } 9424 } // enable-unsafe-fp-math 9425 9426 // FADD -> FMA combines: 9427 if (SDValue Fused = visitFADDForFMACombine(N)) { 9428 AddToWorklist(Fused.getNode()); 9429 return Fused; 9430 } 9431 return SDValue(); 9432 } 9433 9434 SDValue DAGCombiner::visitFSUB(SDNode *N) { 9435 SDValue N0 = N->getOperand(0); 9436 SDValue N1 = N->getOperand(1); 9437 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 9438 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 9439 EVT VT = N->getValueType(0); 9440 SDLoc DL(N); 9441 const TargetOptions &Options = DAG.getTarget().Options; 9442 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 9443 9444 // fold vector ops 9445 if (VT.isVector()) 9446 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 9447 return FoldedVOp; 9448 9449 // fold (fsub c1, c2) -> c1-c2 9450 if (N0CFP && N1CFP) 9451 return DAG.getNode(ISD::FSUB, DL, VT, N0, N1, Flags); 9452 9453 if (SDValue NewSel = foldBinOpIntoSelect(N)) 9454 return NewSel; 9455 9456 // fold (fsub A, (fneg B)) -> (fadd A, B) 9457 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options)) 9458 return DAG.getNode(ISD::FADD, DL, VT, N0, 9459 GetNegatedExpression(N1, DAG, LegalOperations), Flags); 9460 9461 // FIXME: Auto-upgrade the target/function-level option. 9462 if (Options.NoSignedZerosFPMath || N->getFlags()->hasNoSignedZeros()) { 9463 // (fsub 0, B) -> -B 9464 if (N0CFP && N0CFP->isZero()) { 9465 if (isNegatibleForFree(N1, LegalOperations, TLI, &Options)) 9466 return GetNegatedExpression(N1, DAG, LegalOperations); 9467 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 9468 return DAG.getNode(ISD::FNEG, DL, VT, N1, Flags); 9469 } 9470 } 9471 9472 // If 'unsafe math' is enabled, fold lots of things. 9473 if (Options.UnsafeFPMath) { 9474 // (fsub A, 0) -> A 9475 if (N1CFP && N1CFP->isZero()) 9476 return N0; 9477 9478 // (fsub x, x) -> 0.0 9479 if (N0 == N1) 9480 return DAG.getConstantFP(0.0f, DL, VT); 9481 9482 // (fsub x, (fadd x, y)) -> (fneg y) 9483 // (fsub x, (fadd y, x)) -> (fneg y) 9484 if (N1.getOpcode() == ISD::FADD) { 9485 SDValue N10 = N1->getOperand(0); 9486 SDValue N11 = N1->getOperand(1); 9487 9488 if (N10 == N0 && isNegatibleForFree(N11, LegalOperations, TLI, &Options)) 9489 return GetNegatedExpression(N11, DAG, LegalOperations); 9490 9491 if (N11 == N0 && isNegatibleForFree(N10, LegalOperations, TLI, &Options)) 9492 return GetNegatedExpression(N10, DAG, LegalOperations); 9493 } 9494 } 9495 9496 // FSUB -> FMA combines: 9497 if (SDValue Fused = visitFSUBForFMACombine(N)) { 9498 AddToWorklist(Fused.getNode()); 9499 return Fused; 9500 } 9501 9502 return SDValue(); 9503 } 9504 9505 SDValue DAGCombiner::visitFMUL(SDNode *N) { 9506 SDValue N0 = N->getOperand(0); 9507 SDValue N1 = N->getOperand(1); 9508 ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 9509 ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 9510 EVT VT = N->getValueType(0); 9511 SDLoc DL(N); 9512 const TargetOptions &Options = DAG.getTarget().Options; 9513 const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 9514 9515 // fold vector ops 9516 if (VT.isVector()) { 9517 // This just handles C1 * C2 for vectors. Other vector folds are below. 9518 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 9519 return FoldedVOp; 9520 } 9521 9522 // fold (fmul c1, c2) -> c1*c2 9523 if (N0CFP && N1CFP) 9524 return DAG.getNode(ISD::FMUL, DL, VT, N0, N1, Flags); 9525 9526 // canonicalize constant to RHS 9527 if (isConstantFPBuildVectorOrConstantFP(N0) && 9528 !isConstantFPBuildVectorOrConstantFP(N1)) 9529 return DAG.getNode(ISD::FMUL, DL, VT, N1, N0, Flags); 9530 9531 // fold (fmul A, 1.0) -> A 9532 if (N1CFP && N1CFP->isExactlyValue(1.0)) 9533 return N0; 9534 9535 if (SDValue NewSel = foldBinOpIntoSelect(N)) 9536 return NewSel; 9537 9538 if (Options.UnsafeFPMath) { 9539 // fold (fmul A, 0) -> 0 9540 if (N1CFP && N1CFP->isZero()) 9541 return N1; 9542 9543 // fold (fmul (fmul x, c1), c2) -> (fmul x, (fmul c1, c2)) 9544 if (N0.getOpcode() == ISD::FMUL) { 9545 // Fold scalars or any vector constants (not just splats). 9546 // This fold is done in general by InstCombine, but extra fmul insts 9547 // may have been generated during lowering. 9548 SDValue N00 = N0.getOperand(0); 9549 SDValue N01 = N0.getOperand(1); 9550 auto *BV1 = dyn_cast<BuildVectorSDNode>(N1); 9551 auto *BV00 = dyn_cast<BuildVectorSDNode>(N00); 9552 auto *BV01 = dyn_cast<BuildVectorSDNode>(N01); 9553 9554 // Check 1: Make sure that the first operand of the inner multiply is NOT 9555 // a constant. Otherwise, we may induce infinite looping. 9556 if (!(isConstOrConstSplatFP(N00) || (BV00 && BV00->isConstant()))) { 9557 // Check 2: Make sure that the second operand of the inner multiply and 9558 // the second operand of the outer multiply are constants. 9559 if ((N1CFP && isConstOrConstSplatFP(N01)) || 9560 (BV1 && BV01 && BV1->isConstant() && BV01->isConstant())) { 9561 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, N01, N1, Flags); 9562 return DAG.getNode(ISD::FMUL, DL, VT, N00, MulConsts, Flags); 9563 } 9564 } 9565 } 9566 9567 // fold (fmul (fadd x, x), c) -> (fmul x, (fmul 2.0, c)) 9568 // Undo the fmul 2.0, x -> fadd x, x transformation, since if it occurs 9569 // during an early run of DAGCombiner can prevent folding with fmuls 9570 // inserted during lowering. 9571 if (N0.getOpcode() == ISD::FADD && 9572 (N0.getOperand(0) == N0.getOperand(1)) && 9573 N0.hasOneUse()) { 9574 const SDValue Two = DAG.getConstantFP(2.0, DL, VT); 9575 SDValue MulConsts = DAG.getNode(ISD::FMUL, DL, VT, Two, N1, Flags); 9576 return DAG.getNode(ISD::FMUL, DL, VT, N0.getOperand(0), MulConsts, Flags); 9577 } 9578 } 9579 9580 // fold (fmul X, 2.0) -> (fadd X, X) 9581 if (N1CFP && N1CFP->isExactlyValue(+2.0)) 9582 return DAG.getNode(ISD::FADD, DL, VT, N0, N0, Flags); 9583 9584 // fold (fmul X, -1.0) -> (fneg X) 9585 if (N1CFP && N1CFP->isExactlyValue(-1.0)) 9586 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 9587 return DAG.getNode(ISD::FNEG, DL, VT, N0); 9588 9589 // fold (fmul (fneg X), (fneg Y)) -> (fmul X, Y) 9590 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) { 9591 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) { 9592 // Both can be negated for free, check to see if at least one is cheaper 9593 // negated. 9594 if (LHSNeg == 2 || RHSNeg == 2) 9595 return DAG.getNode(ISD::FMUL, DL, VT, 9596 GetNegatedExpression(N0, DAG, LegalOperations), 9597 GetNegatedExpression(N1, DAG, LegalOperations), 9598 Flags); 9599 } 9600 } 9601 9602 // FMUL -> FMA combines: 9603 if (SDValue Fused = visitFMULForFMADistributiveCombine(N)) { 9604 AddToWorklist(Fused.getNode()); 9605 return Fused; 9606 } 9607 9608 return SDValue(); 9609 } 9610 9611 SDValue DAGCombiner::visitFMA(SDNode *N) { 9612 SDValue N0 = N->getOperand(0); 9613 SDValue N1 = N->getOperand(1); 9614 SDValue N2 = N->getOperand(2); 9615 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9616 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9617 EVT VT = N->getValueType(0); 9618 SDLoc DL(N); 9619 const TargetOptions &Options = DAG.getTarget().Options; 9620 9621 // Constant fold FMA. 9622 if (isa<ConstantFPSDNode>(N0) && 9623 isa<ConstantFPSDNode>(N1) && 9624 isa<ConstantFPSDNode>(N2)) { 9625 return DAG.getNode(ISD::FMA, DL, VT, N0, N1, N2); 9626 } 9627 9628 if (Options.UnsafeFPMath) { 9629 if (N0CFP && N0CFP->isZero()) 9630 return N2; 9631 if (N1CFP && N1CFP->isZero()) 9632 return N2; 9633 } 9634 // TODO: The FMA node should have flags that propagate to these nodes. 9635 if (N0CFP && N0CFP->isExactlyValue(1.0)) 9636 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N1, N2); 9637 if (N1CFP && N1CFP->isExactlyValue(1.0)) 9638 return DAG.getNode(ISD::FADD, SDLoc(N), VT, N0, N2); 9639 9640 // Canonicalize (fma c, x, y) -> (fma x, c, y) 9641 if (isConstantFPBuildVectorOrConstantFP(N0) && 9642 !isConstantFPBuildVectorOrConstantFP(N1)) 9643 return DAG.getNode(ISD::FMA, SDLoc(N), VT, N1, N0, N2); 9644 9645 // TODO: FMA nodes should have flags that propagate to the created nodes. 9646 // For now, create a Flags object for use with all unsafe math transforms. 9647 SDNodeFlags Flags; 9648 Flags.setUnsafeAlgebra(true); 9649 9650 if (Options.UnsafeFPMath) { 9651 // (fma x, c1, (fmul x, c2)) -> (fmul x, c1+c2) 9652 if (N2.getOpcode() == ISD::FMUL && N0 == N2.getOperand(0) && 9653 isConstantFPBuildVectorOrConstantFP(N1) && 9654 isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) { 9655 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9656 DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1), 9657 &Flags), &Flags); 9658 } 9659 9660 // (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y) 9661 if (N0.getOpcode() == ISD::FMUL && 9662 isConstantFPBuildVectorOrConstantFP(N1) && 9663 isConstantFPBuildVectorOrConstantFP(N0.getOperand(1))) { 9664 return DAG.getNode(ISD::FMA, DL, VT, 9665 N0.getOperand(0), 9666 DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1), 9667 &Flags), 9668 N2); 9669 } 9670 } 9671 9672 // (fma x, 1, y) -> (fadd x, y) 9673 // (fma x, -1, y) -> (fadd (fneg x), y) 9674 if (N1CFP) { 9675 if (N1CFP->isExactlyValue(1.0)) 9676 // TODO: The FMA node should have flags that propagate to this node. 9677 return DAG.getNode(ISD::FADD, DL, VT, N0, N2); 9678 9679 if (N1CFP->isExactlyValue(-1.0) && 9680 (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT))) { 9681 SDValue RHSNeg = DAG.getNode(ISD::FNEG, DL, VT, N0); 9682 AddToWorklist(RHSNeg.getNode()); 9683 // TODO: The FMA node should have flags that propagate to this node. 9684 return DAG.getNode(ISD::FADD, DL, VT, N2, RHSNeg); 9685 } 9686 } 9687 9688 if (Options.UnsafeFPMath) { 9689 // (fma x, c, x) -> (fmul x, (c+1)) 9690 if (N1CFP && N0 == N2) { 9691 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9692 DAG.getNode(ISD::FADD, DL, VT, N1, 9693 DAG.getConstantFP(1.0, DL, VT), &Flags), 9694 &Flags); 9695 } 9696 9697 // (fma x, c, (fneg x)) -> (fmul x, (c-1)) 9698 if (N1CFP && N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) { 9699 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9700 DAG.getNode(ISD::FADD, DL, VT, N1, 9701 DAG.getConstantFP(-1.0, DL, VT), &Flags), 9702 &Flags); 9703 } 9704 } 9705 9706 return SDValue(); 9707 } 9708 9709 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 9710 // reciprocal. 9711 // E.g., (a / D; b / D;) -> (recip = 1.0 / D; a * recip; b * recip) 9712 // Notice that this is not always beneficial. One reason is different targets 9713 // may have different costs for FDIV and FMUL, so sometimes the cost of two 9714 // FDIVs may be lower than the cost of one FDIV and two FMULs. Another reason 9715 // is the critical path is increased from "one FDIV" to "one FDIV + one FMUL". 9716 SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) { 9717 bool UnsafeMath = DAG.getTarget().Options.UnsafeFPMath; 9718 const SDNodeFlags *Flags = N->getFlags(); 9719 if (!UnsafeMath && !Flags->hasAllowReciprocal()) 9720 return SDValue(); 9721 9722 // Skip if current node is a reciprocal. 9723 SDValue N0 = N->getOperand(0); 9724 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9725 if (N0CFP && N0CFP->isExactlyValue(1.0)) 9726 return SDValue(); 9727 9728 // Exit early if the target does not want this transform or if there can't 9729 // possibly be enough uses of the divisor to make the transform worthwhile. 9730 SDValue N1 = N->getOperand(1); 9731 unsigned MinUses = TLI.combineRepeatedFPDivisors(); 9732 if (!MinUses || N1->use_size() < MinUses) 9733 return SDValue(); 9734 9735 // Find all FDIV users of the same divisor. 9736 // Use a set because duplicates may be present in the user list. 9737 SetVector<SDNode *> Users; 9738 for (auto *U : N1->uses()) { 9739 if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) { 9740 // This division is eligible for optimization only if global unsafe math 9741 // is enabled or if this division allows reciprocal formation. 9742 if (UnsafeMath || U->getFlags()->hasAllowReciprocal()) 9743 Users.insert(U); 9744 } 9745 } 9746 9747 // Now that we have the actual number of divisor uses, make sure it meets 9748 // the minimum threshold specified by the target. 9749 if (Users.size() < MinUses) 9750 return SDValue(); 9751 9752 EVT VT = N->getValueType(0); 9753 SDLoc DL(N); 9754 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT); 9755 SDValue Reciprocal = DAG.getNode(ISD::FDIV, DL, VT, FPOne, N1, Flags); 9756 9757 // Dividend / Divisor -> Dividend * Reciprocal 9758 for (auto *U : Users) { 9759 SDValue Dividend = U->getOperand(0); 9760 if (Dividend != FPOne) { 9761 SDValue NewNode = DAG.getNode(ISD::FMUL, SDLoc(U), VT, Dividend, 9762 Reciprocal, Flags); 9763 CombineTo(U, NewNode); 9764 } else if (U != Reciprocal.getNode()) { 9765 // In the absence of fast-math-flags, this user node is always the 9766 // same node as Reciprocal, but with FMF they may be different nodes. 9767 CombineTo(U, Reciprocal); 9768 } 9769 } 9770 return SDValue(N, 0); // N was replaced. 9771 } 9772 9773 SDValue DAGCombiner::visitFDIV(SDNode *N) { 9774 SDValue N0 = N->getOperand(0); 9775 SDValue N1 = N->getOperand(1); 9776 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9777 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9778 EVT VT = N->getValueType(0); 9779 SDLoc DL(N); 9780 const TargetOptions &Options = DAG.getTarget().Options; 9781 SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags; 9782 9783 // fold vector ops 9784 if (VT.isVector()) 9785 if (SDValue FoldedVOp = SimplifyVBinOp(N)) 9786 return FoldedVOp; 9787 9788 // fold (fdiv c1, c2) -> c1/c2 9789 if (N0CFP && N1CFP) 9790 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, N0, N1, Flags); 9791 9792 if (SDValue NewSel = foldBinOpIntoSelect(N)) 9793 return NewSel; 9794 9795 if (Options.UnsafeFPMath) { 9796 // fold (fdiv X, c2) -> fmul X, 1/c2 if losing precision is acceptable. 9797 if (N1CFP) { 9798 // Compute the reciprocal 1.0 / c2. 9799 const APFloat &N1APF = N1CFP->getValueAPF(); 9800 APFloat Recip(N1APF.getSemantics(), 1); // 1.0 9801 APFloat::opStatus st = Recip.divide(N1APF, APFloat::rmNearestTiesToEven); 9802 // Only do the transform if the reciprocal is a legal fp immediate that 9803 // isn't too nasty (eg NaN, denormal, ...). 9804 if ((st == APFloat::opOK || st == APFloat::opInexact) && // Not too nasty 9805 (!LegalOperations || 9806 // FIXME: custom lowering of ConstantFP might fail (see e.g. ARM 9807 // backend)... we should handle this gracefully after Legalize. 9808 // TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT) || 9809 TLI.isOperationLegal(llvm::ISD::ConstantFP, VT) || 9810 TLI.isFPImmLegal(Recip, VT))) 9811 return DAG.getNode(ISD::FMUL, DL, VT, N0, 9812 DAG.getConstantFP(Recip, DL, VT), Flags); 9813 } 9814 9815 // If this FDIV is part of a reciprocal square root, it may be folded 9816 // into a target-specific square root estimate instruction. 9817 if (N1.getOpcode() == ISD::FSQRT) { 9818 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0), Flags)) { 9819 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9820 } 9821 } else if (N1.getOpcode() == ISD::FP_EXTEND && 9822 N1.getOperand(0).getOpcode() == ISD::FSQRT) { 9823 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0), 9824 Flags)) { 9825 RV = DAG.getNode(ISD::FP_EXTEND, SDLoc(N1), VT, RV); 9826 AddToWorklist(RV.getNode()); 9827 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9828 } 9829 } else if (N1.getOpcode() == ISD::FP_ROUND && 9830 N1.getOperand(0).getOpcode() == ISD::FSQRT) { 9831 if (SDValue RV = buildRsqrtEstimate(N1.getOperand(0).getOperand(0), 9832 Flags)) { 9833 RV = DAG.getNode(ISD::FP_ROUND, SDLoc(N1), VT, RV, N1.getOperand(1)); 9834 AddToWorklist(RV.getNode()); 9835 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9836 } 9837 } else if (N1.getOpcode() == ISD::FMUL) { 9838 // Look through an FMUL. Even though this won't remove the FDIV directly, 9839 // it's still worthwhile to get rid of the FSQRT if possible. 9840 SDValue SqrtOp; 9841 SDValue OtherOp; 9842 if (N1.getOperand(0).getOpcode() == ISD::FSQRT) { 9843 SqrtOp = N1.getOperand(0); 9844 OtherOp = N1.getOperand(1); 9845 } else if (N1.getOperand(1).getOpcode() == ISD::FSQRT) { 9846 SqrtOp = N1.getOperand(1); 9847 OtherOp = N1.getOperand(0); 9848 } 9849 if (SqrtOp.getNode()) { 9850 // We found a FSQRT, so try to make this fold: 9851 // x / (y * sqrt(z)) -> x * (rsqrt(z) / y) 9852 if (SDValue RV = buildRsqrtEstimate(SqrtOp.getOperand(0), Flags)) { 9853 RV = DAG.getNode(ISD::FDIV, SDLoc(N1), VT, RV, OtherOp, Flags); 9854 AddToWorklist(RV.getNode()); 9855 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9856 } 9857 } 9858 } 9859 9860 // Fold into a reciprocal estimate and multiply instead of a real divide. 9861 if (SDValue RV = BuildReciprocalEstimate(N1, Flags)) { 9862 AddToWorklist(RV.getNode()); 9863 return DAG.getNode(ISD::FMUL, DL, VT, N0, RV, Flags); 9864 } 9865 } 9866 9867 // (fdiv (fneg X), (fneg Y)) -> (fdiv X, Y) 9868 if (char LHSNeg = isNegatibleForFree(N0, LegalOperations, TLI, &Options)) { 9869 if (char RHSNeg = isNegatibleForFree(N1, LegalOperations, TLI, &Options)) { 9870 // Both can be negated for free, check to see if at least one is cheaper 9871 // negated. 9872 if (LHSNeg == 2 || RHSNeg == 2) 9873 return DAG.getNode(ISD::FDIV, SDLoc(N), VT, 9874 GetNegatedExpression(N0, DAG, LegalOperations), 9875 GetNegatedExpression(N1, DAG, LegalOperations), 9876 Flags); 9877 } 9878 } 9879 9880 if (SDValue CombineRepeatedDivisors = combineRepeatedFPDivisors(N)) 9881 return CombineRepeatedDivisors; 9882 9883 return SDValue(); 9884 } 9885 9886 SDValue DAGCombiner::visitFREM(SDNode *N) { 9887 SDValue N0 = N->getOperand(0); 9888 SDValue N1 = N->getOperand(1); 9889 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9890 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9891 EVT VT = N->getValueType(0); 9892 9893 // fold (frem c1, c2) -> fmod(c1,c2) 9894 if (N0CFP && N1CFP) 9895 return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1, 9896 &cast<BinaryWithFlagsSDNode>(N)->Flags); 9897 9898 if (SDValue NewSel = foldBinOpIntoSelect(N)) 9899 return NewSel; 9900 9901 return SDValue(); 9902 } 9903 9904 SDValue DAGCombiner::visitFSQRT(SDNode *N) { 9905 if (!DAG.getTarget().Options.UnsafeFPMath) 9906 return SDValue(); 9907 9908 SDValue N0 = N->getOperand(0); 9909 if (TLI.isFsqrtCheap(N0, DAG)) 9910 return SDValue(); 9911 9912 // TODO: FSQRT nodes should have flags that propagate to the created nodes. 9913 // For now, create a Flags object for use with all unsafe math transforms. 9914 SDNodeFlags Flags; 9915 Flags.setUnsafeAlgebra(true); 9916 return buildSqrtEstimate(N0, &Flags); 9917 } 9918 9919 /// copysign(x, fp_extend(y)) -> copysign(x, y) 9920 /// copysign(x, fp_round(y)) -> copysign(x, y) 9921 static inline bool CanCombineFCOPYSIGN_EXTEND_ROUND(SDNode *N) { 9922 SDValue N1 = N->getOperand(1); 9923 if ((N1.getOpcode() == ISD::FP_EXTEND || 9924 N1.getOpcode() == ISD::FP_ROUND)) { 9925 // Do not optimize out type conversion of f128 type yet. 9926 // For some targets like x86_64, configuration is changed to keep one f128 9927 // value in one SSE register, but instruction selection cannot handle 9928 // FCOPYSIGN on SSE registers yet. 9929 EVT N1VT = N1->getValueType(0); 9930 EVT N1Op0VT = N1->getOperand(0)->getValueType(0); 9931 return (N1VT == N1Op0VT || N1Op0VT != MVT::f128); 9932 } 9933 return false; 9934 } 9935 9936 SDValue DAGCombiner::visitFCOPYSIGN(SDNode *N) { 9937 SDValue N0 = N->getOperand(0); 9938 SDValue N1 = N->getOperand(1); 9939 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 9940 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1); 9941 EVT VT = N->getValueType(0); 9942 9943 if (N0CFP && N1CFP) // Constant fold 9944 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1); 9945 9946 if (N1CFP) { 9947 const APFloat &V = N1CFP->getValueAPF(); 9948 // copysign(x, c1) -> fabs(x) iff ispos(c1) 9949 // copysign(x, c1) -> fneg(fabs(x)) iff isneg(c1) 9950 if (!V.isNegative()) { 9951 if (!LegalOperations || TLI.isOperationLegal(ISD::FABS, VT)) 9952 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 9953 } else { 9954 if (!LegalOperations || TLI.isOperationLegal(ISD::FNEG, VT)) 9955 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, 9956 DAG.getNode(ISD::FABS, SDLoc(N0), VT, N0)); 9957 } 9958 } 9959 9960 // copysign(fabs(x), y) -> copysign(x, y) 9961 // copysign(fneg(x), y) -> copysign(x, y) 9962 // copysign(copysign(x,z), y) -> copysign(x, y) 9963 if (N0.getOpcode() == ISD::FABS || N0.getOpcode() == ISD::FNEG || 9964 N0.getOpcode() == ISD::FCOPYSIGN) 9965 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0.getOperand(0), N1); 9966 9967 // copysign(x, abs(y)) -> abs(x) 9968 if (N1.getOpcode() == ISD::FABS) 9969 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 9970 9971 // copysign(x, copysign(y,z)) -> copysign(x, z) 9972 if (N1.getOpcode() == ISD::FCOPYSIGN) 9973 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(1)); 9974 9975 // copysign(x, fp_extend(y)) -> copysign(x, y) 9976 // copysign(x, fp_round(y)) -> copysign(x, y) 9977 if (CanCombineFCOPYSIGN_EXTEND_ROUND(N)) 9978 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, N0, N1.getOperand(0)); 9979 9980 return SDValue(); 9981 } 9982 9983 SDValue DAGCombiner::visitSINT_TO_FP(SDNode *N) { 9984 SDValue N0 = N->getOperand(0); 9985 EVT VT = N->getValueType(0); 9986 EVT OpVT = N0.getValueType(); 9987 9988 // fold (sint_to_fp c1) -> c1fp 9989 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 9990 // ...but only if the target supports immediate floating-point values 9991 (!LegalOperations || 9992 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 9993 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0); 9994 9995 // If the input is a legal type, and SINT_TO_FP is not legal on this target, 9996 // but UINT_TO_FP is legal on this target, try to convert. 9997 if (!TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT) && 9998 TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT)) { 9999 // If the sign bit is known to be zero, we can change this to UINT_TO_FP. 10000 if (DAG.SignBitIsZero(N0)) 10001 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0); 10002 } 10003 10004 // The next optimizations are desirable only if SELECT_CC can be lowered. 10005 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) { 10006 // fold (sint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 10007 if (N0.getOpcode() == ISD::SETCC && N0.getValueType() == MVT::i1 && 10008 !VT.isVector() && 10009 (!LegalOperations || 10010 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 10011 SDLoc DL(N); 10012 SDValue Ops[] = 10013 { N0.getOperand(0), N0.getOperand(1), 10014 DAG.getConstantFP(-1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT), 10015 N0.getOperand(2) }; 10016 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 10017 } 10018 10019 // fold (sint_to_fp (zext (setcc x, y, cc))) -> 10020 // (select_cc x, y, 1.0, 0.0,, cc) 10021 if (N0.getOpcode() == ISD::ZERO_EXTEND && 10022 N0.getOperand(0).getOpcode() == ISD::SETCC &&!VT.isVector() && 10023 (!LegalOperations || 10024 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 10025 SDLoc DL(N); 10026 SDValue Ops[] = 10027 { N0.getOperand(0).getOperand(0), N0.getOperand(0).getOperand(1), 10028 DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT), 10029 N0.getOperand(0).getOperand(2) }; 10030 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 10031 } 10032 } 10033 10034 return SDValue(); 10035 } 10036 10037 SDValue DAGCombiner::visitUINT_TO_FP(SDNode *N) { 10038 SDValue N0 = N->getOperand(0); 10039 EVT VT = N->getValueType(0); 10040 EVT OpVT = N0.getValueType(); 10041 10042 // fold (uint_to_fp c1) -> c1fp 10043 if (DAG.isConstantIntBuildVectorOrConstantInt(N0) && 10044 // ...but only if the target supports immediate floating-point values 10045 (!LegalOperations || 10046 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) 10047 return DAG.getNode(ISD::UINT_TO_FP, SDLoc(N), VT, N0); 10048 10049 // If the input is a legal type, and UINT_TO_FP is not legal on this target, 10050 // but SINT_TO_FP is legal on this target, try to convert. 10051 if (!TLI.isOperationLegalOrCustom(ISD::UINT_TO_FP, OpVT) && 10052 TLI.isOperationLegalOrCustom(ISD::SINT_TO_FP, OpVT)) { 10053 // If the sign bit is known to be zero, we can change this to SINT_TO_FP. 10054 if (DAG.SignBitIsZero(N0)) 10055 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, N0); 10056 } 10057 10058 // The next optimizations are desirable only if SELECT_CC can be lowered. 10059 if (TLI.isOperationLegalOrCustom(ISD::SELECT_CC, VT) || !LegalOperations) { 10060 // fold (uint_to_fp (setcc x, y, cc)) -> (select_cc x, y, -1.0, 0.0,, cc) 10061 10062 if (N0.getOpcode() == ISD::SETCC && !VT.isVector() && 10063 (!LegalOperations || 10064 TLI.isOperationLegalOrCustom(llvm::ISD::ConstantFP, VT))) { 10065 SDLoc DL(N); 10066 SDValue Ops[] = 10067 { N0.getOperand(0), N0.getOperand(1), 10068 DAG.getConstantFP(1.0, DL, VT), DAG.getConstantFP(0.0, DL, VT), 10069 N0.getOperand(2) }; 10070 return DAG.getNode(ISD::SELECT_CC, DL, VT, Ops); 10071 } 10072 } 10073 10074 return SDValue(); 10075 } 10076 10077 // Fold (fp_to_{s/u}int ({s/u}int_to_fpx)) -> zext x, sext x, trunc x, or x 10078 static SDValue FoldIntToFPToInt(SDNode *N, SelectionDAG &DAG) { 10079 SDValue N0 = N->getOperand(0); 10080 EVT VT = N->getValueType(0); 10081 10082 if (N0.getOpcode() != ISD::UINT_TO_FP && N0.getOpcode() != ISD::SINT_TO_FP) 10083 return SDValue(); 10084 10085 SDValue Src = N0.getOperand(0); 10086 EVT SrcVT = Src.getValueType(); 10087 bool IsInputSigned = N0.getOpcode() == ISD::SINT_TO_FP; 10088 bool IsOutputSigned = N->getOpcode() == ISD::FP_TO_SINT; 10089 10090 // We can safely assume the conversion won't overflow the output range, 10091 // because (for example) (uint8_t)18293.f is undefined behavior. 10092 10093 // Since we can assume the conversion won't overflow, our decision as to 10094 // whether the input will fit in the float should depend on the minimum 10095 // of the input range and output range. 10096 10097 // This means this is also safe for a signed input and unsigned output, since 10098 // a negative input would lead to undefined behavior. 10099 unsigned InputSize = (int)SrcVT.getScalarSizeInBits() - IsInputSigned; 10100 unsigned OutputSize = (int)VT.getScalarSizeInBits() - IsOutputSigned; 10101 unsigned ActualSize = std::min(InputSize, OutputSize); 10102 const fltSemantics &sem = DAG.EVTToAPFloatSemantics(N0.getValueType()); 10103 10104 // We can only fold away the float conversion if the input range can be 10105 // represented exactly in the float range. 10106 if (APFloat::semanticsPrecision(sem) >= ActualSize) { 10107 if (VT.getScalarSizeInBits() > SrcVT.getScalarSizeInBits()) { 10108 unsigned ExtOp = IsInputSigned && IsOutputSigned ? ISD::SIGN_EXTEND 10109 : ISD::ZERO_EXTEND; 10110 return DAG.getNode(ExtOp, SDLoc(N), VT, Src); 10111 } 10112 if (VT.getScalarSizeInBits() < SrcVT.getScalarSizeInBits()) 10113 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Src); 10114 return DAG.getBitcast(VT, Src); 10115 } 10116 return SDValue(); 10117 } 10118 10119 SDValue DAGCombiner::visitFP_TO_SINT(SDNode *N) { 10120 SDValue N0 = N->getOperand(0); 10121 EVT VT = N->getValueType(0); 10122 10123 // fold (fp_to_sint c1fp) -> c1 10124 if (isConstantFPBuildVectorOrConstantFP(N0)) 10125 return DAG.getNode(ISD::FP_TO_SINT, SDLoc(N), VT, N0); 10126 10127 return FoldIntToFPToInt(N, DAG); 10128 } 10129 10130 SDValue DAGCombiner::visitFP_TO_UINT(SDNode *N) { 10131 SDValue N0 = N->getOperand(0); 10132 EVT VT = N->getValueType(0); 10133 10134 // fold (fp_to_uint c1fp) -> c1 10135 if (isConstantFPBuildVectorOrConstantFP(N0)) 10136 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), VT, N0); 10137 10138 return FoldIntToFPToInt(N, DAG); 10139 } 10140 10141 SDValue DAGCombiner::visitFP_ROUND(SDNode *N) { 10142 SDValue N0 = N->getOperand(0); 10143 SDValue N1 = N->getOperand(1); 10144 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 10145 EVT VT = N->getValueType(0); 10146 10147 // fold (fp_round c1fp) -> c1fp 10148 if (N0CFP) 10149 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, N0, N1); 10150 10151 // fold (fp_round (fp_extend x)) -> x 10152 if (N0.getOpcode() == ISD::FP_EXTEND && VT == N0.getOperand(0).getValueType()) 10153 return N0.getOperand(0); 10154 10155 // fold (fp_round (fp_round x)) -> (fp_round x) 10156 if (N0.getOpcode() == ISD::FP_ROUND) { 10157 const bool NIsTrunc = N->getConstantOperandVal(1) == 1; 10158 const bool N0IsTrunc = N0.getConstantOperandVal(1) == 1; 10159 10160 // Skip this folding if it results in an fp_round from f80 to f16. 10161 // 10162 // f80 to f16 always generates an expensive (and as yet, unimplemented) 10163 // libcall to __truncxfhf2 instead of selecting native f16 conversion 10164 // instructions from f32 or f64. Moreover, the first (value-preserving) 10165 // fp_round from f80 to either f32 or f64 may become a NOP in platforms like 10166 // x86. 10167 if (N0.getOperand(0).getValueType() == MVT::f80 && VT == MVT::f16) 10168 return SDValue(); 10169 10170 // If the first fp_round isn't a value preserving truncation, it might 10171 // introduce a tie in the second fp_round, that wouldn't occur in the 10172 // single-step fp_round we want to fold to. 10173 // In other words, double rounding isn't the same as rounding. 10174 // Also, this is a value preserving truncation iff both fp_round's are. 10175 if (DAG.getTarget().Options.UnsafeFPMath || N0IsTrunc) { 10176 SDLoc DL(N); 10177 return DAG.getNode(ISD::FP_ROUND, DL, VT, N0.getOperand(0), 10178 DAG.getIntPtrConstant(NIsTrunc && N0IsTrunc, DL)); 10179 } 10180 } 10181 10182 // fold (fp_round (copysign X, Y)) -> (copysign (fp_round X), Y) 10183 if (N0.getOpcode() == ISD::FCOPYSIGN && N0.getNode()->hasOneUse()) { 10184 SDValue Tmp = DAG.getNode(ISD::FP_ROUND, SDLoc(N0), VT, 10185 N0.getOperand(0), N1); 10186 AddToWorklist(Tmp.getNode()); 10187 return DAG.getNode(ISD::FCOPYSIGN, SDLoc(N), VT, 10188 Tmp, N0.getOperand(1)); 10189 } 10190 10191 return SDValue(); 10192 } 10193 10194 SDValue DAGCombiner::visitFP_ROUND_INREG(SDNode *N) { 10195 SDValue N0 = N->getOperand(0); 10196 EVT VT = N->getValueType(0); 10197 EVT EVT = cast<VTSDNode>(N->getOperand(1))->getVT(); 10198 ConstantFPSDNode *N0CFP = dyn_cast<ConstantFPSDNode>(N0); 10199 10200 // fold (fp_round_inreg c1fp) -> c1fp 10201 if (N0CFP && isTypeLegal(EVT)) { 10202 SDLoc DL(N); 10203 SDValue Round = DAG.getConstantFP(*N0CFP->getConstantFPValue(), DL, EVT); 10204 return DAG.getNode(ISD::FP_EXTEND, DL, VT, Round); 10205 } 10206 10207 return SDValue(); 10208 } 10209 10210 SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) { 10211 SDValue N0 = N->getOperand(0); 10212 EVT VT = N->getValueType(0); 10213 10214 // If this is fp_round(fpextend), don't fold it, allow ourselves to be folded. 10215 if (N->hasOneUse() && 10216 N->use_begin()->getOpcode() == ISD::FP_ROUND) 10217 return SDValue(); 10218 10219 // fold (fp_extend c1fp) -> c1fp 10220 if (isConstantFPBuildVectorOrConstantFP(N0)) 10221 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, N0); 10222 10223 // fold (fp_extend (fp16_to_fp op)) -> (fp16_to_fp op) 10224 if (N0.getOpcode() == ISD::FP16_TO_FP && 10225 TLI.getOperationAction(ISD::FP16_TO_FP, VT) == TargetLowering::Legal) 10226 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), VT, N0.getOperand(0)); 10227 10228 // Turn fp_extend(fp_round(X, 1)) -> x since the fp_round doesn't affect the 10229 // value of X. 10230 if (N0.getOpcode() == ISD::FP_ROUND 10231 && N0.getConstantOperandVal(1) == 1) { 10232 SDValue In = N0.getOperand(0); 10233 if (In.getValueType() == VT) return In; 10234 if (VT.bitsLT(In.getValueType())) 10235 return DAG.getNode(ISD::FP_ROUND, SDLoc(N), VT, 10236 In, N0.getOperand(1)); 10237 return DAG.getNode(ISD::FP_EXTEND, SDLoc(N), VT, In); 10238 } 10239 10240 // fold (fpext (load x)) -> (fpext (fptrunc (extload x))) 10241 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 10242 TLI.isLoadExtLegal(ISD::EXTLOAD, VT, N0.getValueType())) { 10243 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 10244 SDValue ExtLoad = DAG.getExtLoad(ISD::EXTLOAD, SDLoc(N), VT, 10245 LN0->getChain(), 10246 LN0->getBasePtr(), N0.getValueType(), 10247 LN0->getMemOperand()); 10248 CombineTo(N, ExtLoad); 10249 CombineTo(N0.getNode(), 10250 DAG.getNode(ISD::FP_ROUND, SDLoc(N0), 10251 N0.getValueType(), ExtLoad, 10252 DAG.getIntPtrConstant(1, SDLoc(N0))), 10253 ExtLoad.getValue(1)); 10254 return SDValue(N, 0); // Return N so it doesn't get rechecked! 10255 } 10256 10257 return SDValue(); 10258 } 10259 10260 SDValue DAGCombiner::visitFCEIL(SDNode *N) { 10261 SDValue N0 = N->getOperand(0); 10262 EVT VT = N->getValueType(0); 10263 10264 // fold (fceil c1) -> fceil(c1) 10265 if (isConstantFPBuildVectorOrConstantFP(N0)) 10266 return DAG.getNode(ISD::FCEIL, SDLoc(N), VT, N0); 10267 10268 return SDValue(); 10269 } 10270 10271 SDValue DAGCombiner::visitFTRUNC(SDNode *N) { 10272 SDValue N0 = N->getOperand(0); 10273 EVT VT = N->getValueType(0); 10274 10275 // fold (ftrunc c1) -> ftrunc(c1) 10276 if (isConstantFPBuildVectorOrConstantFP(N0)) 10277 return DAG.getNode(ISD::FTRUNC, SDLoc(N), VT, N0); 10278 10279 return SDValue(); 10280 } 10281 10282 SDValue DAGCombiner::visitFFLOOR(SDNode *N) { 10283 SDValue N0 = N->getOperand(0); 10284 EVT VT = N->getValueType(0); 10285 10286 // fold (ffloor c1) -> ffloor(c1) 10287 if (isConstantFPBuildVectorOrConstantFP(N0)) 10288 return DAG.getNode(ISD::FFLOOR, SDLoc(N), VT, N0); 10289 10290 return SDValue(); 10291 } 10292 10293 // FIXME: FNEG and FABS have a lot in common; refactor. 10294 SDValue DAGCombiner::visitFNEG(SDNode *N) { 10295 SDValue N0 = N->getOperand(0); 10296 EVT VT = N->getValueType(0); 10297 10298 // Constant fold FNEG. 10299 if (isConstantFPBuildVectorOrConstantFP(N0)) 10300 return DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0); 10301 10302 if (isNegatibleForFree(N0, LegalOperations, DAG.getTargetLoweringInfo(), 10303 &DAG.getTarget().Options)) 10304 return GetNegatedExpression(N0, DAG, LegalOperations); 10305 10306 // Transform fneg(bitconvert(x)) -> bitconvert(x ^ sign) to avoid loading 10307 // constant pool values. 10308 if (!TLI.isFNegFree(VT) && 10309 N0.getOpcode() == ISD::BITCAST && 10310 N0.getNode()->hasOneUse()) { 10311 SDValue Int = N0.getOperand(0); 10312 EVT IntVT = Int.getValueType(); 10313 if (IntVT.isInteger() && !IntVT.isVector()) { 10314 APInt SignMask; 10315 if (N0.getValueType().isVector()) { 10316 // For a vector, get a mask such as 0x80... per scalar element 10317 // and splat it. 10318 SignMask = APInt::getSignBit(N0.getScalarValueSizeInBits()); 10319 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask); 10320 } else { 10321 // For a scalar, just generate 0x80... 10322 SignMask = APInt::getSignBit(IntVT.getSizeInBits()); 10323 } 10324 SDLoc DL0(N0); 10325 Int = DAG.getNode(ISD::XOR, DL0, IntVT, Int, 10326 DAG.getConstant(SignMask, DL0, IntVT)); 10327 AddToWorklist(Int.getNode()); 10328 return DAG.getBitcast(VT, Int); 10329 } 10330 } 10331 10332 // (fneg (fmul c, x)) -> (fmul -c, x) 10333 if (N0.getOpcode() == ISD::FMUL && 10334 (N0.getNode()->hasOneUse() || !TLI.isFNegFree(VT))) { 10335 ConstantFPSDNode *CFP1 = dyn_cast<ConstantFPSDNode>(N0.getOperand(1)); 10336 if (CFP1) { 10337 APFloat CVal = CFP1->getValueAPF(); 10338 CVal.changeSign(); 10339 if (Level >= AfterLegalizeDAG && 10340 (TLI.isFPImmLegal(CVal, VT) || 10341 TLI.isOperationLegal(ISD::ConstantFP, VT))) 10342 return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0.getOperand(0), 10343 DAG.getNode(ISD::FNEG, SDLoc(N), VT, 10344 N0.getOperand(1)), 10345 &cast<BinaryWithFlagsSDNode>(N0)->Flags); 10346 } 10347 } 10348 10349 return SDValue(); 10350 } 10351 10352 SDValue DAGCombiner::visitFMINNUM(SDNode *N) { 10353 SDValue N0 = N->getOperand(0); 10354 SDValue N1 = N->getOperand(1); 10355 EVT VT = N->getValueType(0); 10356 const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 10357 const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 10358 10359 if (N0CFP && N1CFP) { 10360 const APFloat &C0 = N0CFP->getValueAPF(); 10361 const APFloat &C1 = N1CFP->getValueAPF(); 10362 return DAG.getConstantFP(minnum(C0, C1), SDLoc(N), VT); 10363 } 10364 10365 // Canonicalize to constant on RHS. 10366 if (isConstantFPBuildVectorOrConstantFP(N0) && 10367 !isConstantFPBuildVectorOrConstantFP(N1)) 10368 return DAG.getNode(ISD::FMINNUM, SDLoc(N), VT, N1, N0); 10369 10370 return SDValue(); 10371 } 10372 10373 SDValue DAGCombiner::visitFMAXNUM(SDNode *N) { 10374 SDValue N0 = N->getOperand(0); 10375 SDValue N1 = N->getOperand(1); 10376 EVT VT = N->getValueType(0); 10377 const ConstantFPSDNode *N0CFP = isConstOrConstSplatFP(N0); 10378 const ConstantFPSDNode *N1CFP = isConstOrConstSplatFP(N1); 10379 10380 if (N0CFP && N1CFP) { 10381 const APFloat &C0 = N0CFP->getValueAPF(); 10382 const APFloat &C1 = N1CFP->getValueAPF(); 10383 return DAG.getConstantFP(maxnum(C0, C1), SDLoc(N), VT); 10384 } 10385 10386 // Canonicalize to constant on RHS. 10387 if (isConstantFPBuildVectorOrConstantFP(N0) && 10388 !isConstantFPBuildVectorOrConstantFP(N1)) 10389 return DAG.getNode(ISD::FMAXNUM, SDLoc(N), VT, N1, N0); 10390 10391 return SDValue(); 10392 } 10393 10394 SDValue DAGCombiner::visitFABS(SDNode *N) { 10395 SDValue N0 = N->getOperand(0); 10396 EVT VT = N->getValueType(0); 10397 10398 // fold (fabs c1) -> fabs(c1) 10399 if (isConstantFPBuildVectorOrConstantFP(N0)) 10400 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0); 10401 10402 // fold (fabs (fabs x)) -> (fabs x) 10403 if (N0.getOpcode() == ISD::FABS) 10404 return N->getOperand(0); 10405 10406 // fold (fabs (fneg x)) -> (fabs x) 10407 // fold (fabs (fcopysign x, y)) -> (fabs x) 10408 if (N0.getOpcode() == ISD::FNEG || N0.getOpcode() == ISD::FCOPYSIGN) 10409 return DAG.getNode(ISD::FABS, SDLoc(N), VT, N0.getOperand(0)); 10410 10411 // Transform fabs(bitconvert(x)) -> bitconvert(x & ~sign) to avoid loading 10412 // constant pool values. 10413 if (!TLI.isFAbsFree(VT) && 10414 N0.getOpcode() == ISD::BITCAST && 10415 N0.getNode()->hasOneUse()) { 10416 SDValue Int = N0.getOperand(0); 10417 EVT IntVT = Int.getValueType(); 10418 if (IntVT.isInteger() && !IntVT.isVector()) { 10419 APInt SignMask; 10420 if (N0.getValueType().isVector()) { 10421 // For a vector, get a mask such as 0x7f... per scalar element 10422 // and splat it. 10423 SignMask = ~APInt::getSignBit(N0.getScalarValueSizeInBits()); 10424 SignMask = APInt::getSplat(IntVT.getSizeInBits(), SignMask); 10425 } else { 10426 // For a scalar, just generate 0x7f... 10427 SignMask = ~APInt::getSignBit(IntVT.getSizeInBits()); 10428 } 10429 SDLoc DL(N0); 10430 Int = DAG.getNode(ISD::AND, DL, IntVT, Int, 10431 DAG.getConstant(SignMask, DL, IntVT)); 10432 AddToWorklist(Int.getNode()); 10433 return DAG.getBitcast(N->getValueType(0), Int); 10434 } 10435 } 10436 10437 return SDValue(); 10438 } 10439 10440 SDValue DAGCombiner::visitBRCOND(SDNode *N) { 10441 SDValue Chain = N->getOperand(0); 10442 SDValue N1 = N->getOperand(1); 10443 SDValue N2 = N->getOperand(2); 10444 10445 // If N is a constant we could fold this into a fallthrough or unconditional 10446 // branch. However that doesn't happen very often in normal code, because 10447 // Instcombine/SimplifyCFG should have handled the available opportunities. 10448 // If we did this folding here, it would be necessary to update the 10449 // MachineBasicBlock CFG, which is awkward. 10450 10451 // fold a brcond with a setcc condition into a BR_CC node if BR_CC is legal 10452 // on the target. 10453 if (N1.getOpcode() == ISD::SETCC && 10454 TLI.isOperationLegalOrCustom(ISD::BR_CC, 10455 N1.getOperand(0).getValueType())) { 10456 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other, 10457 Chain, N1.getOperand(2), 10458 N1.getOperand(0), N1.getOperand(1), N2); 10459 } 10460 10461 if ((N1.hasOneUse() && N1.getOpcode() == ISD::SRL) || 10462 ((N1.getOpcode() == ISD::TRUNCATE && N1.hasOneUse()) && 10463 (N1.getOperand(0).hasOneUse() && 10464 N1.getOperand(0).getOpcode() == ISD::SRL))) { 10465 SDNode *Trunc = nullptr; 10466 if (N1.getOpcode() == ISD::TRUNCATE) { 10467 // Look pass the truncate. 10468 Trunc = N1.getNode(); 10469 N1 = N1.getOperand(0); 10470 } 10471 10472 // Match this pattern so that we can generate simpler code: 10473 // 10474 // %a = ... 10475 // %b = and i32 %a, 2 10476 // %c = srl i32 %b, 1 10477 // brcond i32 %c ... 10478 // 10479 // into 10480 // 10481 // %a = ... 10482 // %b = and i32 %a, 2 10483 // %c = setcc eq %b, 0 10484 // brcond %c ... 10485 // 10486 // This applies only when the AND constant value has one bit set and the 10487 // SRL constant is equal to the log2 of the AND constant. The back-end is 10488 // smart enough to convert the result into a TEST/JMP sequence. 10489 SDValue Op0 = N1.getOperand(0); 10490 SDValue Op1 = N1.getOperand(1); 10491 10492 if (Op0.getOpcode() == ISD::AND && 10493 Op1.getOpcode() == ISD::Constant) { 10494 SDValue AndOp1 = Op0.getOperand(1); 10495 10496 if (AndOp1.getOpcode() == ISD::Constant) { 10497 const APInt &AndConst = cast<ConstantSDNode>(AndOp1)->getAPIntValue(); 10498 10499 if (AndConst.isPowerOf2() && 10500 cast<ConstantSDNode>(Op1)->getAPIntValue()==AndConst.logBase2()) { 10501 SDLoc DL(N); 10502 SDValue SetCC = 10503 DAG.getSetCC(DL, 10504 getSetCCResultType(Op0.getValueType()), 10505 Op0, DAG.getConstant(0, DL, Op0.getValueType()), 10506 ISD::SETNE); 10507 10508 SDValue NewBRCond = DAG.getNode(ISD::BRCOND, DL, 10509 MVT::Other, Chain, SetCC, N2); 10510 // Don't add the new BRCond into the worklist or else SimplifySelectCC 10511 // will convert it back to (X & C1) >> C2. 10512 CombineTo(N, NewBRCond, false); 10513 // Truncate is dead. 10514 if (Trunc) 10515 deleteAndRecombine(Trunc); 10516 // Replace the uses of SRL with SETCC 10517 WorklistRemover DeadNodes(*this); 10518 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 10519 deleteAndRecombine(N1.getNode()); 10520 return SDValue(N, 0); // Return N so it doesn't get rechecked! 10521 } 10522 } 10523 } 10524 10525 if (Trunc) 10526 // Restore N1 if the above transformation doesn't match. 10527 N1 = N->getOperand(1); 10528 } 10529 10530 // Transform br(xor(x, y)) -> br(x != y) 10531 // Transform br(xor(xor(x,y), 1)) -> br (x == y) 10532 if (N1.hasOneUse() && N1.getOpcode() == ISD::XOR) { 10533 SDNode *TheXor = N1.getNode(); 10534 SDValue Op0 = TheXor->getOperand(0); 10535 SDValue Op1 = TheXor->getOperand(1); 10536 if (Op0.getOpcode() == Op1.getOpcode()) { 10537 // Avoid missing important xor optimizations. 10538 if (SDValue Tmp = visitXOR(TheXor)) { 10539 if (Tmp.getNode() != TheXor) { 10540 DEBUG(dbgs() << "\nReplacing.8 "; 10541 TheXor->dump(&DAG); 10542 dbgs() << "\nWith: "; 10543 Tmp.getNode()->dump(&DAG); 10544 dbgs() << '\n'); 10545 WorklistRemover DeadNodes(*this); 10546 DAG.ReplaceAllUsesOfValueWith(N1, Tmp); 10547 deleteAndRecombine(TheXor); 10548 return DAG.getNode(ISD::BRCOND, SDLoc(N), 10549 MVT::Other, Chain, Tmp, N2); 10550 } 10551 10552 // visitXOR has changed XOR's operands or replaced the XOR completely, 10553 // bail out. 10554 return SDValue(N, 0); 10555 } 10556 } 10557 10558 if (Op0.getOpcode() != ISD::SETCC && Op1.getOpcode() != ISD::SETCC) { 10559 bool Equal = false; 10560 if (isOneConstant(Op0) && Op0.hasOneUse() && 10561 Op0.getOpcode() == ISD::XOR) { 10562 TheXor = Op0.getNode(); 10563 Equal = true; 10564 } 10565 10566 EVT SetCCVT = N1.getValueType(); 10567 if (LegalTypes) 10568 SetCCVT = getSetCCResultType(SetCCVT); 10569 SDValue SetCC = DAG.getSetCC(SDLoc(TheXor), 10570 SetCCVT, 10571 Op0, Op1, 10572 Equal ? ISD::SETEQ : ISD::SETNE); 10573 // Replace the uses of XOR with SETCC 10574 WorklistRemover DeadNodes(*this); 10575 DAG.ReplaceAllUsesOfValueWith(N1, SetCC); 10576 deleteAndRecombine(N1.getNode()); 10577 return DAG.getNode(ISD::BRCOND, SDLoc(N), 10578 MVT::Other, Chain, SetCC, N2); 10579 } 10580 } 10581 10582 return SDValue(); 10583 } 10584 10585 // Operand List for BR_CC: Chain, CondCC, CondLHS, CondRHS, DestBB. 10586 // 10587 SDValue DAGCombiner::visitBR_CC(SDNode *N) { 10588 CondCodeSDNode *CC = cast<CondCodeSDNode>(N->getOperand(1)); 10589 SDValue CondLHS = N->getOperand(2), CondRHS = N->getOperand(3); 10590 10591 // If N is a constant we could fold this into a fallthrough or unconditional 10592 // branch. However that doesn't happen very often in normal code, because 10593 // Instcombine/SimplifyCFG should have handled the available opportunities. 10594 // If we did this folding here, it would be necessary to update the 10595 // MachineBasicBlock CFG, which is awkward. 10596 10597 // Use SimplifySetCC to simplify SETCC's. 10598 SDValue Simp = SimplifySetCC(getSetCCResultType(CondLHS.getValueType()), 10599 CondLHS, CondRHS, CC->get(), SDLoc(N), 10600 false); 10601 if (Simp.getNode()) AddToWorklist(Simp.getNode()); 10602 10603 // fold to a simpler setcc 10604 if (Simp.getNode() && Simp.getOpcode() == ISD::SETCC) 10605 return DAG.getNode(ISD::BR_CC, SDLoc(N), MVT::Other, 10606 N->getOperand(0), Simp.getOperand(2), 10607 Simp.getOperand(0), Simp.getOperand(1), 10608 N->getOperand(4)); 10609 10610 return SDValue(); 10611 } 10612 10613 /// Return true if 'Use' is a load or a store that uses N as its base pointer 10614 /// and that N may be folded in the load / store addressing mode. 10615 static bool canFoldInAddressingMode(SDNode *N, SDNode *Use, 10616 SelectionDAG &DAG, 10617 const TargetLowering &TLI) { 10618 EVT VT; 10619 unsigned AS; 10620 10621 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Use)) { 10622 if (LD->isIndexed() || LD->getBasePtr().getNode() != N) 10623 return false; 10624 VT = LD->getMemoryVT(); 10625 AS = LD->getAddressSpace(); 10626 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(Use)) { 10627 if (ST->isIndexed() || ST->getBasePtr().getNode() != N) 10628 return false; 10629 VT = ST->getMemoryVT(); 10630 AS = ST->getAddressSpace(); 10631 } else 10632 return false; 10633 10634 TargetLowering::AddrMode AM; 10635 if (N->getOpcode() == ISD::ADD) { 10636 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10637 if (Offset) 10638 // [reg +/- imm] 10639 AM.BaseOffs = Offset->getSExtValue(); 10640 else 10641 // [reg +/- reg] 10642 AM.Scale = 1; 10643 } else if (N->getOpcode() == ISD::SUB) { 10644 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10645 if (Offset) 10646 // [reg +/- imm] 10647 AM.BaseOffs = -Offset->getSExtValue(); 10648 else 10649 // [reg +/- reg] 10650 AM.Scale = 1; 10651 } else 10652 return false; 10653 10654 return TLI.isLegalAddressingMode(DAG.getDataLayout(), AM, 10655 VT.getTypeForEVT(*DAG.getContext()), AS); 10656 } 10657 10658 /// Try turning a load/store into a pre-indexed load/store when the base 10659 /// pointer is an add or subtract and it has other uses besides the load/store. 10660 /// After the transformation, the new indexed load/store has effectively folded 10661 /// the add/subtract in and all of its other uses are redirected to the 10662 /// new load/store. 10663 bool DAGCombiner::CombineToPreIndexedLoadStore(SDNode *N) { 10664 if (Level < AfterLegalizeDAG) 10665 return false; 10666 10667 bool isLoad = true; 10668 SDValue Ptr; 10669 EVT VT; 10670 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 10671 if (LD->isIndexed()) 10672 return false; 10673 VT = LD->getMemoryVT(); 10674 if (!TLI.isIndexedLoadLegal(ISD::PRE_INC, VT) && 10675 !TLI.isIndexedLoadLegal(ISD::PRE_DEC, VT)) 10676 return false; 10677 Ptr = LD->getBasePtr(); 10678 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 10679 if (ST->isIndexed()) 10680 return false; 10681 VT = ST->getMemoryVT(); 10682 if (!TLI.isIndexedStoreLegal(ISD::PRE_INC, VT) && 10683 !TLI.isIndexedStoreLegal(ISD::PRE_DEC, VT)) 10684 return false; 10685 Ptr = ST->getBasePtr(); 10686 isLoad = false; 10687 } else { 10688 return false; 10689 } 10690 10691 // If the pointer is not an add/sub, or if it doesn't have multiple uses, bail 10692 // out. There is no reason to make this a preinc/predec. 10693 if ((Ptr.getOpcode() != ISD::ADD && Ptr.getOpcode() != ISD::SUB) || 10694 Ptr.getNode()->hasOneUse()) 10695 return false; 10696 10697 // Ask the target to do addressing mode selection. 10698 SDValue BasePtr; 10699 SDValue Offset; 10700 ISD::MemIndexedMode AM = ISD::UNINDEXED; 10701 if (!TLI.getPreIndexedAddressParts(N, BasePtr, Offset, AM, DAG)) 10702 return false; 10703 10704 // Backends without true r+i pre-indexed forms may need to pass a 10705 // constant base with a variable offset so that constant coercion 10706 // will work with the patterns in canonical form. 10707 bool Swapped = false; 10708 if (isa<ConstantSDNode>(BasePtr)) { 10709 std::swap(BasePtr, Offset); 10710 Swapped = true; 10711 } 10712 10713 // Don't create a indexed load / store with zero offset. 10714 if (isNullConstant(Offset)) 10715 return false; 10716 10717 // Try turning it into a pre-indexed load / store except when: 10718 // 1) The new base ptr is a frame index. 10719 // 2) If N is a store and the new base ptr is either the same as or is a 10720 // predecessor of the value being stored. 10721 // 3) Another use of old base ptr is a predecessor of N. If ptr is folded 10722 // that would create a cycle. 10723 // 4) All uses are load / store ops that use it as old base ptr. 10724 10725 // Check #1. Preinc'ing a frame index would require copying the stack pointer 10726 // (plus the implicit offset) to a register to preinc anyway. 10727 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 10728 return false; 10729 10730 // Check #2. 10731 if (!isLoad) { 10732 SDValue Val = cast<StoreSDNode>(N)->getValue(); 10733 if (Val == BasePtr || BasePtr.getNode()->isPredecessorOf(Val.getNode())) 10734 return false; 10735 } 10736 10737 // Caches for hasPredecessorHelper. 10738 SmallPtrSet<const SDNode *, 32> Visited; 10739 SmallVector<const SDNode *, 16> Worklist; 10740 Worklist.push_back(N); 10741 10742 // If the offset is a constant, there may be other adds of constants that 10743 // can be folded with this one. We should do this to avoid having to keep 10744 // a copy of the original base pointer. 10745 SmallVector<SDNode *, 16> OtherUses; 10746 if (isa<ConstantSDNode>(Offset)) 10747 for (SDNode::use_iterator UI = BasePtr.getNode()->use_begin(), 10748 UE = BasePtr.getNode()->use_end(); 10749 UI != UE; ++UI) { 10750 SDUse &Use = UI.getUse(); 10751 // Skip the use that is Ptr and uses of other results from BasePtr's 10752 // node (important for nodes that return multiple results). 10753 if (Use.getUser() == Ptr.getNode() || Use != BasePtr) 10754 continue; 10755 10756 if (SDNode::hasPredecessorHelper(Use.getUser(), Visited, Worklist)) 10757 continue; 10758 10759 if (Use.getUser()->getOpcode() != ISD::ADD && 10760 Use.getUser()->getOpcode() != ISD::SUB) { 10761 OtherUses.clear(); 10762 break; 10763 } 10764 10765 SDValue Op1 = Use.getUser()->getOperand((UI.getOperandNo() + 1) & 1); 10766 if (!isa<ConstantSDNode>(Op1)) { 10767 OtherUses.clear(); 10768 break; 10769 } 10770 10771 // FIXME: In some cases, we can be smarter about this. 10772 if (Op1.getValueType() != Offset.getValueType()) { 10773 OtherUses.clear(); 10774 break; 10775 } 10776 10777 OtherUses.push_back(Use.getUser()); 10778 } 10779 10780 if (Swapped) 10781 std::swap(BasePtr, Offset); 10782 10783 // Now check for #3 and #4. 10784 bool RealUse = false; 10785 10786 for (SDNode *Use : Ptr.getNode()->uses()) { 10787 if (Use == N) 10788 continue; 10789 if (SDNode::hasPredecessorHelper(Use, Visited, Worklist)) 10790 return false; 10791 10792 // If Ptr may be folded in addressing mode of other use, then it's 10793 // not profitable to do this transformation. 10794 if (!canFoldInAddressingMode(Ptr.getNode(), Use, DAG, TLI)) 10795 RealUse = true; 10796 } 10797 10798 if (!RealUse) 10799 return false; 10800 10801 SDValue Result; 10802 if (isLoad) 10803 Result = DAG.getIndexedLoad(SDValue(N,0), SDLoc(N), 10804 BasePtr, Offset, AM); 10805 else 10806 Result = DAG.getIndexedStore(SDValue(N,0), SDLoc(N), 10807 BasePtr, Offset, AM); 10808 ++PreIndexedNodes; 10809 ++NodesCombined; 10810 DEBUG(dbgs() << "\nReplacing.4 "; 10811 N->dump(&DAG); 10812 dbgs() << "\nWith: "; 10813 Result.getNode()->dump(&DAG); 10814 dbgs() << '\n'); 10815 WorklistRemover DeadNodes(*this); 10816 if (isLoad) { 10817 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 10818 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 10819 } else { 10820 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 10821 } 10822 10823 // Finally, since the node is now dead, remove it from the graph. 10824 deleteAndRecombine(N); 10825 10826 if (Swapped) 10827 std::swap(BasePtr, Offset); 10828 10829 // Replace other uses of BasePtr that can be updated to use Ptr 10830 for (unsigned i = 0, e = OtherUses.size(); i != e; ++i) { 10831 unsigned OffsetIdx = 1; 10832 if (OtherUses[i]->getOperand(OffsetIdx).getNode() == BasePtr.getNode()) 10833 OffsetIdx = 0; 10834 assert(OtherUses[i]->getOperand(!OffsetIdx).getNode() == 10835 BasePtr.getNode() && "Expected BasePtr operand"); 10836 10837 // We need to replace ptr0 in the following expression: 10838 // x0 * offset0 + y0 * ptr0 = t0 10839 // knowing that 10840 // x1 * offset1 + y1 * ptr0 = t1 (the indexed load/store) 10841 // 10842 // where x0, x1, y0 and y1 in {-1, 1} are given by the types of the 10843 // indexed load/store and the expresion that needs to be re-written. 10844 // 10845 // Therefore, we have: 10846 // t0 = (x0 * offset0 - x1 * y0 * y1 *offset1) + (y0 * y1) * t1 10847 10848 ConstantSDNode *CN = 10849 cast<ConstantSDNode>(OtherUses[i]->getOperand(OffsetIdx)); 10850 int X0, X1, Y0, Y1; 10851 const APInt &Offset0 = CN->getAPIntValue(); 10852 APInt Offset1 = cast<ConstantSDNode>(Offset)->getAPIntValue(); 10853 10854 X0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 1) ? -1 : 1; 10855 Y0 = (OtherUses[i]->getOpcode() == ISD::SUB && OffsetIdx == 0) ? -1 : 1; 10856 X1 = (AM == ISD::PRE_DEC && !Swapped) ? -1 : 1; 10857 Y1 = (AM == ISD::PRE_DEC && Swapped) ? -1 : 1; 10858 10859 unsigned Opcode = (Y0 * Y1 < 0) ? ISD::SUB : ISD::ADD; 10860 10861 APInt CNV = Offset0; 10862 if (X0 < 0) CNV = -CNV; 10863 if (X1 * Y0 * Y1 < 0) CNV = CNV + Offset1; 10864 else CNV = CNV - Offset1; 10865 10866 SDLoc DL(OtherUses[i]); 10867 10868 // We can now generate the new expression. 10869 SDValue NewOp1 = DAG.getConstant(CNV, DL, CN->getValueType(0)); 10870 SDValue NewOp2 = Result.getValue(isLoad ? 1 : 0); 10871 10872 SDValue NewUse = DAG.getNode(Opcode, 10873 DL, 10874 OtherUses[i]->getValueType(0), NewOp1, NewOp2); 10875 DAG.ReplaceAllUsesOfValueWith(SDValue(OtherUses[i], 0), NewUse); 10876 deleteAndRecombine(OtherUses[i]); 10877 } 10878 10879 // Replace the uses of Ptr with uses of the updated base value. 10880 DAG.ReplaceAllUsesOfValueWith(Ptr, Result.getValue(isLoad ? 1 : 0)); 10881 deleteAndRecombine(Ptr.getNode()); 10882 10883 return true; 10884 } 10885 10886 /// Try to combine a load/store with a add/sub of the base pointer node into a 10887 /// post-indexed load/store. The transformation folded the add/subtract into the 10888 /// new indexed load/store effectively and all of its uses are redirected to the 10889 /// new load/store. 10890 bool DAGCombiner::CombineToPostIndexedLoadStore(SDNode *N) { 10891 if (Level < AfterLegalizeDAG) 10892 return false; 10893 10894 bool isLoad = true; 10895 SDValue Ptr; 10896 EVT VT; 10897 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 10898 if (LD->isIndexed()) 10899 return false; 10900 VT = LD->getMemoryVT(); 10901 if (!TLI.isIndexedLoadLegal(ISD::POST_INC, VT) && 10902 !TLI.isIndexedLoadLegal(ISD::POST_DEC, VT)) 10903 return false; 10904 Ptr = LD->getBasePtr(); 10905 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 10906 if (ST->isIndexed()) 10907 return false; 10908 VT = ST->getMemoryVT(); 10909 if (!TLI.isIndexedStoreLegal(ISD::POST_INC, VT) && 10910 !TLI.isIndexedStoreLegal(ISD::POST_DEC, VT)) 10911 return false; 10912 Ptr = ST->getBasePtr(); 10913 isLoad = false; 10914 } else { 10915 return false; 10916 } 10917 10918 if (Ptr.getNode()->hasOneUse()) 10919 return false; 10920 10921 for (SDNode *Op : Ptr.getNode()->uses()) { 10922 if (Op == N || 10923 (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB)) 10924 continue; 10925 10926 SDValue BasePtr; 10927 SDValue Offset; 10928 ISD::MemIndexedMode AM = ISD::UNINDEXED; 10929 if (TLI.getPostIndexedAddressParts(N, Op, BasePtr, Offset, AM, DAG)) { 10930 // Don't create a indexed load / store with zero offset. 10931 if (isNullConstant(Offset)) 10932 continue; 10933 10934 // Try turning it into a post-indexed load / store except when 10935 // 1) All uses are load / store ops that use it as base ptr (and 10936 // it may be folded as addressing mmode). 10937 // 2) Op must be independent of N, i.e. Op is neither a predecessor 10938 // nor a successor of N. Otherwise, if Op is folded that would 10939 // create a cycle. 10940 10941 if (isa<FrameIndexSDNode>(BasePtr) || isa<RegisterSDNode>(BasePtr)) 10942 continue; 10943 10944 // Check for #1. 10945 bool TryNext = false; 10946 for (SDNode *Use : BasePtr.getNode()->uses()) { 10947 if (Use == Ptr.getNode()) 10948 continue; 10949 10950 // If all the uses are load / store addresses, then don't do the 10951 // transformation. 10952 if (Use->getOpcode() == ISD::ADD || Use->getOpcode() == ISD::SUB){ 10953 bool RealUse = false; 10954 for (SDNode *UseUse : Use->uses()) { 10955 if (!canFoldInAddressingMode(Use, UseUse, DAG, TLI)) 10956 RealUse = true; 10957 } 10958 10959 if (!RealUse) { 10960 TryNext = true; 10961 break; 10962 } 10963 } 10964 } 10965 10966 if (TryNext) 10967 continue; 10968 10969 // Check for #2 10970 if (!Op->isPredecessorOf(N) && !N->isPredecessorOf(Op)) { 10971 SDValue Result = isLoad 10972 ? DAG.getIndexedLoad(SDValue(N,0), SDLoc(N), 10973 BasePtr, Offset, AM) 10974 : DAG.getIndexedStore(SDValue(N,0), SDLoc(N), 10975 BasePtr, Offset, AM); 10976 ++PostIndexedNodes; 10977 ++NodesCombined; 10978 DEBUG(dbgs() << "\nReplacing.5 "; 10979 N->dump(&DAG); 10980 dbgs() << "\nWith: "; 10981 Result.getNode()->dump(&DAG); 10982 dbgs() << '\n'); 10983 WorklistRemover DeadNodes(*this); 10984 if (isLoad) { 10985 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(0)); 10986 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Result.getValue(2)); 10987 } else { 10988 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Result.getValue(1)); 10989 } 10990 10991 // Finally, since the node is now dead, remove it from the graph. 10992 deleteAndRecombine(N); 10993 10994 // Replace the uses of Use with uses of the updated base value. 10995 DAG.ReplaceAllUsesOfValueWith(SDValue(Op, 0), 10996 Result.getValue(isLoad ? 1 : 0)); 10997 deleteAndRecombine(Op); 10998 return true; 10999 } 11000 } 11001 } 11002 11003 return false; 11004 } 11005 11006 /// \brief Return the base-pointer arithmetic from an indexed \p LD. 11007 SDValue DAGCombiner::SplitIndexingFromLoad(LoadSDNode *LD) { 11008 ISD::MemIndexedMode AM = LD->getAddressingMode(); 11009 assert(AM != ISD::UNINDEXED); 11010 SDValue BP = LD->getOperand(1); 11011 SDValue Inc = LD->getOperand(2); 11012 11013 // Some backends use TargetConstants for load offsets, but don't expect 11014 // TargetConstants in general ADD nodes. We can convert these constants into 11015 // regular Constants (if the constant is not opaque). 11016 assert((Inc.getOpcode() != ISD::TargetConstant || 11017 !cast<ConstantSDNode>(Inc)->isOpaque()) && 11018 "Cannot split out indexing using opaque target constants"); 11019 if (Inc.getOpcode() == ISD::TargetConstant) { 11020 ConstantSDNode *ConstInc = cast<ConstantSDNode>(Inc); 11021 Inc = DAG.getConstant(*ConstInc->getConstantIntValue(), SDLoc(Inc), 11022 ConstInc->getValueType(0)); 11023 } 11024 11025 unsigned Opc = 11026 (AM == ISD::PRE_INC || AM == ISD::POST_INC ? ISD::ADD : ISD::SUB); 11027 return DAG.getNode(Opc, SDLoc(LD), BP.getSimpleValueType(), BP, Inc); 11028 } 11029 11030 SDValue DAGCombiner::visitLOAD(SDNode *N) { 11031 LoadSDNode *LD = cast<LoadSDNode>(N); 11032 SDValue Chain = LD->getChain(); 11033 SDValue Ptr = LD->getBasePtr(); 11034 11035 // If load is not volatile and there are no uses of the loaded value (and 11036 // the updated indexed value in case of indexed loads), change uses of the 11037 // chain value into uses of the chain input (i.e. delete the dead load). 11038 if (!LD->isVolatile()) { 11039 if (N->getValueType(1) == MVT::Other) { 11040 // Unindexed loads. 11041 if (!N->hasAnyUseOfValue(0)) { 11042 // It's not safe to use the two value CombineTo variant here. e.g. 11043 // v1, chain2 = load chain1, loc 11044 // v2, chain3 = load chain2, loc 11045 // v3 = add v2, c 11046 // Now we replace use of chain2 with chain1. This makes the second load 11047 // isomorphic to the one we are deleting, and thus makes this load live. 11048 DEBUG(dbgs() << "\nReplacing.6 "; 11049 N->dump(&DAG); 11050 dbgs() << "\nWith chain: "; 11051 Chain.getNode()->dump(&DAG); 11052 dbgs() << "\n"); 11053 WorklistRemover DeadNodes(*this); 11054 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 11055 AddUsersToWorklist(Chain.getNode()); 11056 if (N->use_empty()) 11057 deleteAndRecombine(N); 11058 11059 return SDValue(N, 0); // Return N so it doesn't get rechecked! 11060 } 11061 } else { 11062 // Indexed loads. 11063 assert(N->getValueType(2) == MVT::Other && "Malformed indexed loads?"); 11064 11065 // If this load has an opaque TargetConstant offset, then we cannot split 11066 // the indexing into an add/sub directly (that TargetConstant may not be 11067 // valid for a different type of node, and we cannot convert an opaque 11068 // target constant into a regular constant). 11069 bool HasOTCInc = LD->getOperand(2).getOpcode() == ISD::TargetConstant && 11070 cast<ConstantSDNode>(LD->getOperand(2))->isOpaque(); 11071 11072 if (!N->hasAnyUseOfValue(0) && 11073 ((MaySplitLoadIndex && !HasOTCInc) || !N->hasAnyUseOfValue(1))) { 11074 SDValue Undef = DAG.getUNDEF(N->getValueType(0)); 11075 SDValue Index; 11076 if (N->hasAnyUseOfValue(1) && MaySplitLoadIndex && !HasOTCInc) { 11077 Index = SplitIndexingFromLoad(LD); 11078 // Try to fold the base pointer arithmetic into subsequent loads and 11079 // stores. 11080 AddUsersToWorklist(N); 11081 } else 11082 Index = DAG.getUNDEF(N->getValueType(1)); 11083 DEBUG(dbgs() << "\nReplacing.7 "; 11084 N->dump(&DAG); 11085 dbgs() << "\nWith: "; 11086 Undef.getNode()->dump(&DAG); 11087 dbgs() << " and 2 other values\n"); 11088 WorklistRemover DeadNodes(*this); 11089 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Undef); 11090 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Index); 11091 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 2), Chain); 11092 deleteAndRecombine(N); 11093 return SDValue(N, 0); // Return N so it doesn't get rechecked! 11094 } 11095 } 11096 } 11097 11098 // If this load is directly stored, replace the load value with the stored 11099 // value. 11100 // TODO: Handle store large -> read small portion. 11101 // TODO: Handle TRUNCSTORE/LOADEXT 11102 if (OptLevel != CodeGenOpt::None && 11103 ISD::isNormalLoad(N) && !LD->isVolatile()) { 11104 if (ISD::isNON_TRUNCStore(Chain.getNode())) { 11105 StoreSDNode *PrevST = cast<StoreSDNode>(Chain); 11106 if (PrevST->getBasePtr() == Ptr && 11107 PrevST->getValue().getValueType() == N->getValueType(0)) 11108 return CombineTo(N, PrevST->getOperand(1), Chain); 11109 } 11110 } 11111 11112 // Try to infer better alignment information than the load already has. 11113 if (OptLevel != CodeGenOpt::None && LD->isUnindexed()) { 11114 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 11115 if (Align > LD->getMemOperand()->getBaseAlignment()) { 11116 SDValue NewLoad = DAG.getExtLoad( 11117 LD->getExtensionType(), SDLoc(N), LD->getValueType(0), Chain, Ptr, 11118 LD->getPointerInfo(), LD->getMemoryVT(), Align, 11119 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 11120 if (NewLoad.getNode() != N) 11121 return CombineTo(N, NewLoad, SDValue(NewLoad.getNode(), 1), true); 11122 } 11123 } 11124 } 11125 11126 if (LD->isUnindexed()) { 11127 // Walk up chain skipping non-aliasing memory nodes. 11128 SDValue BetterChain = FindBetterChain(N, Chain); 11129 11130 // If there is a better chain. 11131 if (Chain != BetterChain) { 11132 SDValue ReplLoad; 11133 11134 // Replace the chain to void dependency. 11135 if (LD->getExtensionType() == ISD::NON_EXTLOAD) { 11136 ReplLoad = DAG.getLoad(N->getValueType(0), SDLoc(LD), 11137 BetterChain, Ptr, LD->getMemOperand()); 11138 } else { 11139 ReplLoad = DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), 11140 LD->getValueType(0), 11141 BetterChain, Ptr, LD->getMemoryVT(), 11142 LD->getMemOperand()); 11143 } 11144 11145 // Create token factor to keep old chain connected. 11146 SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N), 11147 MVT::Other, Chain, ReplLoad.getValue(1)); 11148 11149 // Make sure the new and old chains are cleaned up. 11150 AddToWorklist(Token.getNode()); 11151 11152 // Replace uses with load result and token factor. Don't add users 11153 // to work list. 11154 return CombineTo(N, ReplLoad.getValue(0), Token, false); 11155 } 11156 } 11157 11158 // Try transforming N to an indexed load. 11159 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 11160 return SDValue(N, 0); 11161 11162 // Try to slice up N to more direct loads if the slices are mapped to 11163 // different register banks or pairing can take place. 11164 if (SliceUpLoad(N)) 11165 return SDValue(N, 0); 11166 11167 return SDValue(); 11168 } 11169 11170 namespace { 11171 /// \brief Helper structure used to slice a load in smaller loads. 11172 /// Basically a slice is obtained from the following sequence: 11173 /// Origin = load Ty1, Base 11174 /// Shift = srl Ty1 Origin, CstTy Amount 11175 /// Inst = trunc Shift to Ty2 11176 /// 11177 /// Then, it will be rewriten into: 11178 /// Slice = load SliceTy, Base + SliceOffset 11179 /// [Inst = zext Slice to Ty2], only if SliceTy <> Ty2 11180 /// 11181 /// SliceTy is deduced from the number of bits that are actually used to 11182 /// build Inst. 11183 struct LoadedSlice { 11184 /// \brief Helper structure used to compute the cost of a slice. 11185 struct Cost { 11186 /// Are we optimizing for code size. 11187 bool ForCodeSize; 11188 /// Various cost. 11189 unsigned Loads; 11190 unsigned Truncates; 11191 unsigned CrossRegisterBanksCopies; 11192 unsigned ZExts; 11193 unsigned Shift; 11194 11195 Cost(bool ForCodeSize = false) 11196 : ForCodeSize(ForCodeSize), Loads(0), Truncates(0), 11197 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) {} 11198 11199 /// \brief Get the cost of one isolated slice. 11200 Cost(const LoadedSlice &LS, bool ForCodeSize = false) 11201 : ForCodeSize(ForCodeSize), Loads(1), Truncates(0), 11202 CrossRegisterBanksCopies(0), ZExts(0), Shift(0) { 11203 EVT TruncType = LS.Inst->getValueType(0); 11204 EVT LoadedType = LS.getLoadedType(); 11205 if (TruncType != LoadedType && 11206 !LS.DAG->getTargetLoweringInfo().isZExtFree(LoadedType, TruncType)) 11207 ZExts = 1; 11208 } 11209 11210 /// \brief Account for slicing gain in the current cost. 11211 /// Slicing provide a few gains like removing a shift or a 11212 /// truncate. This method allows to grow the cost of the original 11213 /// load with the gain from this slice. 11214 void addSliceGain(const LoadedSlice &LS) { 11215 // Each slice saves a truncate. 11216 const TargetLowering &TLI = LS.DAG->getTargetLoweringInfo(); 11217 if (!TLI.isTruncateFree(LS.Inst->getOperand(0).getValueType(), 11218 LS.Inst->getValueType(0))) 11219 ++Truncates; 11220 // If there is a shift amount, this slice gets rid of it. 11221 if (LS.Shift) 11222 ++Shift; 11223 // If this slice can merge a cross register bank copy, account for it. 11224 if (LS.canMergeExpensiveCrossRegisterBankCopy()) 11225 ++CrossRegisterBanksCopies; 11226 } 11227 11228 Cost &operator+=(const Cost &RHS) { 11229 Loads += RHS.Loads; 11230 Truncates += RHS.Truncates; 11231 CrossRegisterBanksCopies += RHS.CrossRegisterBanksCopies; 11232 ZExts += RHS.ZExts; 11233 Shift += RHS.Shift; 11234 return *this; 11235 } 11236 11237 bool operator==(const Cost &RHS) const { 11238 return Loads == RHS.Loads && Truncates == RHS.Truncates && 11239 CrossRegisterBanksCopies == RHS.CrossRegisterBanksCopies && 11240 ZExts == RHS.ZExts && Shift == RHS.Shift; 11241 } 11242 11243 bool operator!=(const Cost &RHS) const { return !(*this == RHS); } 11244 11245 bool operator<(const Cost &RHS) const { 11246 // Assume cross register banks copies are as expensive as loads. 11247 // FIXME: Do we want some more target hooks? 11248 unsigned ExpensiveOpsLHS = Loads + CrossRegisterBanksCopies; 11249 unsigned ExpensiveOpsRHS = RHS.Loads + RHS.CrossRegisterBanksCopies; 11250 // Unless we are optimizing for code size, consider the 11251 // expensive operation first. 11252 if (!ForCodeSize && ExpensiveOpsLHS != ExpensiveOpsRHS) 11253 return ExpensiveOpsLHS < ExpensiveOpsRHS; 11254 return (Truncates + ZExts + Shift + ExpensiveOpsLHS) < 11255 (RHS.Truncates + RHS.ZExts + RHS.Shift + ExpensiveOpsRHS); 11256 } 11257 11258 bool operator>(const Cost &RHS) const { return RHS < *this; } 11259 11260 bool operator<=(const Cost &RHS) const { return !(RHS < *this); } 11261 11262 bool operator>=(const Cost &RHS) const { return !(*this < RHS); } 11263 }; 11264 // The last instruction that represent the slice. This should be a 11265 // truncate instruction. 11266 SDNode *Inst; 11267 // The original load instruction. 11268 LoadSDNode *Origin; 11269 // The right shift amount in bits from the original load. 11270 unsigned Shift; 11271 // The DAG from which Origin came from. 11272 // This is used to get some contextual information about legal types, etc. 11273 SelectionDAG *DAG; 11274 11275 LoadedSlice(SDNode *Inst = nullptr, LoadSDNode *Origin = nullptr, 11276 unsigned Shift = 0, SelectionDAG *DAG = nullptr) 11277 : Inst(Inst), Origin(Origin), Shift(Shift), DAG(DAG) {} 11278 11279 /// \brief Get the bits used in a chunk of bits \p BitWidth large. 11280 /// \return Result is \p BitWidth and has used bits set to 1 and 11281 /// not used bits set to 0. 11282 APInt getUsedBits() const { 11283 // Reproduce the trunc(lshr) sequence: 11284 // - Start from the truncated value. 11285 // - Zero extend to the desired bit width. 11286 // - Shift left. 11287 assert(Origin && "No original load to compare against."); 11288 unsigned BitWidth = Origin->getValueSizeInBits(0); 11289 assert(Inst && "This slice is not bound to an instruction"); 11290 assert(Inst->getValueSizeInBits(0) <= BitWidth && 11291 "Extracted slice is bigger than the whole type!"); 11292 APInt UsedBits(Inst->getValueSizeInBits(0), 0); 11293 UsedBits.setAllBits(); 11294 UsedBits = UsedBits.zext(BitWidth); 11295 UsedBits <<= Shift; 11296 return UsedBits; 11297 } 11298 11299 /// \brief Get the size of the slice to be loaded in bytes. 11300 unsigned getLoadedSize() const { 11301 unsigned SliceSize = getUsedBits().countPopulation(); 11302 assert(!(SliceSize & 0x7) && "Size is not a multiple of a byte."); 11303 return SliceSize / 8; 11304 } 11305 11306 /// \brief Get the type that will be loaded for this slice. 11307 /// Note: This may not be the final type for the slice. 11308 EVT getLoadedType() const { 11309 assert(DAG && "Missing context"); 11310 LLVMContext &Ctxt = *DAG->getContext(); 11311 return EVT::getIntegerVT(Ctxt, getLoadedSize() * 8); 11312 } 11313 11314 /// \brief Get the alignment of the load used for this slice. 11315 unsigned getAlignment() const { 11316 unsigned Alignment = Origin->getAlignment(); 11317 unsigned Offset = getOffsetFromBase(); 11318 if (Offset != 0) 11319 Alignment = MinAlign(Alignment, Alignment + Offset); 11320 return Alignment; 11321 } 11322 11323 /// \brief Check if this slice can be rewritten with legal operations. 11324 bool isLegal() const { 11325 // An invalid slice is not legal. 11326 if (!Origin || !Inst || !DAG) 11327 return false; 11328 11329 // Offsets are for indexed load only, we do not handle that. 11330 if (!Origin->getOffset().isUndef()) 11331 return false; 11332 11333 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 11334 11335 // Check that the type is legal. 11336 EVT SliceType = getLoadedType(); 11337 if (!TLI.isTypeLegal(SliceType)) 11338 return false; 11339 11340 // Check that the load is legal for this type. 11341 if (!TLI.isOperationLegal(ISD::LOAD, SliceType)) 11342 return false; 11343 11344 // Check that the offset can be computed. 11345 // 1. Check its type. 11346 EVT PtrType = Origin->getBasePtr().getValueType(); 11347 if (PtrType == MVT::Untyped || PtrType.isExtended()) 11348 return false; 11349 11350 // 2. Check that it fits in the immediate. 11351 if (!TLI.isLegalAddImmediate(getOffsetFromBase())) 11352 return false; 11353 11354 // 3. Check that the computation is legal. 11355 if (!TLI.isOperationLegal(ISD::ADD, PtrType)) 11356 return false; 11357 11358 // Check that the zext is legal if it needs one. 11359 EVT TruncateType = Inst->getValueType(0); 11360 if (TruncateType != SliceType && 11361 !TLI.isOperationLegal(ISD::ZERO_EXTEND, TruncateType)) 11362 return false; 11363 11364 return true; 11365 } 11366 11367 /// \brief Get the offset in bytes of this slice in the original chunk of 11368 /// bits. 11369 /// \pre DAG != nullptr. 11370 uint64_t getOffsetFromBase() const { 11371 assert(DAG && "Missing context."); 11372 bool IsBigEndian = DAG->getDataLayout().isBigEndian(); 11373 assert(!(Shift & 0x7) && "Shifts not aligned on Bytes are not supported."); 11374 uint64_t Offset = Shift / 8; 11375 unsigned TySizeInBytes = Origin->getValueSizeInBits(0) / 8; 11376 assert(!(Origin->getValueSizeInBits(0) & 0x7) && 11377 "The size of the original loaded type is not a multiple of a" 11378 " byte."); 11379 // If Offset is bigger than TySizeInBytes, it means we are loading all 11380 // zeros. This should have been optimized before in the process. 11381 assert(TySizeInBytes > Offset && 11382 "Invalid shift amount for given loaded size"); 11383 if (IsBigEndian) 11384 Offset = TySizeInBytes - Offset - getLoadedSize(); 11385 return Offset; 11386 } 11387 11388 /// \brief Generate the sequence of instructions to load the slice 11389 /// represented by this object and redirect the uses of this slice to 11390 /// this new sequence of instructions. 11391 /// \pre this->Inst && this->Origin are valid Instructions and this 11392 /// object passed the legal check: LoadedSlice::isLegal returned true. 11393 /// \return The last instruction of the sequence used to load the slice. 11394 SDValue loadSlice() const { 11395 assert(Inst && Origin && "Unable to replace a non-existing slice."); 11396 const SDValue &OldBaseAddr = Origin->getBasePtr(); 11397 SDValue BaseAddr = OldBaseAddr; 11398 // Get the offset in that chunk of bytes w.r.t. the endianness. 11399 int64_t Offset = static_cast<int64_t>(getOffsetFromBase()); 11400 assert(Offset >= 0 && "Offset too big to fit in int64_t!"); 11401 if (Offset) { 11402 // BaseAddr = BaseAddr + Offset. 11403 EVT ArithType = BaseAddr.getValueType(); 11404 SDLoc DL(Origin); 11405 BaseAddr = DAG->getNode(ISD::ADD, DL, ArithType, BaseAddr, 11406 DAG->getConstant(Offset, DL, ArithType)); 11407 } 11408 11409 // Create the type of the loaded slice according to its size. 11410 EVT SliceType = getLoadedType(); 11411 11412 // Create the load for the slice. 11413 SDValue LastInst = 11414 DAG->getLoad(SliceType, SDLoc(Origin), Origin->getChain(), BaseAddr, 11415 Origin->getPointerInfo().getWithOffset(Offset), 11416 getAlignment(), Origin->getMemOperand()->getFlags()); 11417 // If the final type is not the same as the loaded type, this means that 11418 // we have to pad with zero. Create a zero extend for that. 11419 EVT FinalType = Inst->getValueType(0); 11420 if (SliceType != FinalType) 11421 LastInst = 11422 DAG->getNode(ISD::ZERO_EXTEND, SDLoc(LastInst), FinalType, LastInst); 11423 return LastInst; 11424 } 11425 11426 /// \brief Check if this slice can be merged with an expensive cross register 11427 /// bank copy. E.g., 11428 /// i = load i32 11429 /// f = bitcast i32 i to float 11430 bool canMergeExpensiveCrossRegisterBankCopy() const { 11431 if (!Inst || !Inst->hasOneUse()) 11432 return false; 11433 SDNode *Use = *Inst->use_begin(); 11434 if (Use->getOpcode() != ISD::BITCAST) 11435 return false; 11436 assert(DAG && "Missing context"); 11437 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 11438 EVT ResVT = Use->getValueType(0); 11439 const TargetRegisterClass *ResRC = TLI.getRegClassFor(ResVT.getSimpleVT()); 11440 const TargetRegisterClass *ArgRC = 11441 TLI.getRegClassFor(Use->getOperand(0).getValueType().getSimpleVT()); 11442 if (ArgRC == ResRC || !TLI.isOperationLegal(ISD::LOAD, ResVT)) 11443 return false; 11444 11445 // At this point, we know that we perform a cross-register-bank copy. 11446 // Check if it is expensive. 11447 const TargetRegisterInfo *TRI = DAG->getSubtarget().getRegisterInfo(); 11448 // Assume bitcasts are cheap, unless both register classes do not 11449 // explicitly share a common sub class. 11450 if (!TRI || TRI->getCommonSubClass(ArgRC, ResRC)) 11451 return false; 11452 11453 // Check if it will be merged with the load. 11454 // 1. Check the alignment constraint. 11455 unsigned RequiredAlignment = DAG->getDataLayout().getABITypeAlignment( 11456 ResVT.getTypeForEVT(*DAG->getContext())); 11457 11458 if (RequiredAlignment > getAlignment()) 11459 return false; 11460 11461 // 2. Check that the load is a legal operation for that type. 11462 if (!TLI.isOperationLegal(ISD::LOAD, ResVT)) 11463 return false; 11464 11465 // 3. Check that we do not have a zext in the way. 11466 if (Inst->getValueType(0) != getLoadedType()) 11467 return false; 11468 11469 return true; 11470 } 11471 }; 11472 } 11473 11474 /// \brief Check that all bits set in \p UsedBits form a dense region, i.e., 11475 /// \p UsedBits looks like 0..0 1..1 0..0. 11476 static bool areUsedBitsDense(const APInt &UsedBits) { 11477 // If all the bits are one, this is dense! 11478 if (UsedBits.isAllOnesValue()) 11479 return true; 11480 11481 // Get rid of the unused bits on the right. 11482 APInt NarrowedUsedBits = UsedBits.lshr(UsedBits.countTrailingZeros()); 11483 // Get rid of the unused bits on the left. 11484 if (NarrowedUsedBits.countLeadingZeros()) 11485 NarrowedUsedBits = NarrowedUsedBits.trunc(NarrowedUsedBits.getActiveBits()); 11486 // Check that the chunk of bits is completely used. 11487 return NarrowedUsedBits.isAllOnesValue(); 11488 } 11489 11490 /// \brief Check whether or not \p First and \p Second are next to each other 11491 /// in memory. This means that there is no hole between the bits loaded 11492 /// by \p First and the bits loaded by \p Second. 11493 static bool areSlicesNextToEachOther(const LoadedSlice &First, 11494 const LoadedSlice &Second) { 11495 assert(First.Origin == Second.Origin && First.Origin && 11496 "Unable to match different memory origins."); 11497 APInt UsedBits = First.getUsedBits(); 11498 assert((UsedBits & Second.getUsedBits()) == 0 && 11499 "Slices are not supposed to overlap."); 11500 UsedBits |= Second.getUsedBits(); 11501 return areUsedBitsDense(UsedBits); 11502 } 11503 11504 /// \brief Adjust the \p GlobalLSCost according to the target 11505 /// paring capabilities and the layout of the slices. 11506 /// \pre \p GlobalLSCost should account for at least as many loads as 11507 /// there is in the slices in \p LoadedSlices. 11508 static void adjustCostForPairing(SmallVectorImpl<LoadedSlice> &LoadedSlices, 11509 LoadedSlice::Cost &GlobalLSCost) { 11510 unsigned NumberOfSlices = LoadedSlices.size(); 11511 // If there is less than 2 elements, no pairing is possible. 11512 if (NumberOfSlices < 2) 11513 return; 11514 11515 // Sort the slices so that elements that are likely to be next to each 11516 // other in memory are next to each other in the list. 11517 std::sort(LoadedSlices.begin(), LoadedSlices.end(), 11518 [](const LoadedSlice &LHS, const LoadedSlice &RHS) { 11519 assert(LHS.Origin == RHS.Origin && "Different bases not implemented."); 11520 return LHS.getOffsetFromBase() < RHS.getOffsetFromBase(); 11521 }); 11522 const TargetLowering &TLI = LoadedSlices[0].DAG->getTargetLoweringInfo(); 11523 // First (resp. Second) is the first (resp. Second) potentially candidate 11524 // to be placed in a paired load. 11525 const LoadedSlice *First = nullptr; 11526 const LoadedSlice *Second = nullptr; 11527 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice, 11528 // Set the beginning of the pair. 11529 First = Second) { 11530 11531 Second = &LoadedSlices[CurrSlice]; 11532 11533 // If First is NULL, it means we start a new pair. 11534 // Get to the next slice. 11535 if (!First) 11536 continue; 11537 11538 EVT LoadedType = First->getLoadedType(); 11539 11540 // If the types of the slices are different, we cannot pair them. 11541 if (LoadedType != Second->getLoadedType()) 11542 continue; 11543 11544 // Check if the target supplies paired loads for this type. 11545 unsigned RequiredAlignment = 0; 11546 if (!TLI.hasPairedLoad(LoadedType, RequiredAlignment)) { 11547 // move to the next pair, this type is hopeless. 11548 Second = nullptr; 11549 continue; 11550 } 11551 // Check if we meet the alignment requirement. 11552 if (RequiredAlignment > First->getAlignment()) 11553 continue; 11554 11555 // Check that both loads are next to each other in memory. 11556 if (!areSlicesNextToEachOther(*First, *Second)) 11557 continue; 11558 11559 assert(GlobalLSCost.Loads > 0 && "We save more loads than we created!"); 11560 --GlobalLSCost.Loads; 11561 // Move to the next pair. 11562 Second = nullptr; 11563 } 11564 } 11565 11566 /// \brief Check the profitability of all involved LoadedSlice. 11567 /// Currently, it is considered profitable if there is exactly two 11568 /// involved slices (1) which are (2) next to each other in memory, and 11569 /// whose cost (\see LoadedSlice::Cost) is smaller than the original load (3). 11570 /// 11571 /// Note: The order of the elements in \p LoadedSlices may be modified, but not 11572 /// the elements themselves. 11573 /// 11574 /// FIXME: When the cost model will be mature enough, we can relax 11575 /// constraints (1) and (2). 11576 static bool isSlicingProfitable(SmallVectorImpl<LoadedSlice> &LoadedSlices, 11577 const APInt &UsedBits, bool ForCodeSize) { 11578 unsigned NumberOfSlices = LoadedSlices.size(); 11579 if (StressLoadSlicing) 11580 return NumberOfSlices > 1; 11581 11582 // Check (1). 11583 if (NumberOfSlices != 2) 11584 return false; 11585 11586 // Check (2). 11587 if (!areUsedBitsDense(UsedBits)) 11588 return false; 11589 11590 // Check (3). 11591 LoadedSlice::Cost OrigCost(ForCodeSize), GlobalSlicingCost(ForCodeSize); 11592 // The original code has one big load. 11593 OrigCost.Loads = 1; 11594 for (unsigned CurrSlice = 0; CurrSlice < NumberOfSlices; ++CurrSlice) { 11595 const LoadedSlice &LS = LoadedSlices[CurrSlice]; 11596 // Accumulate the cost of all the slices. 11597 LoadedSlice::Cost SliceCost(LS, ForCodeSize); 11598 GlobalSlicingCost += SliceCost; 11599 11600 // Account as cost in the original configuration the gain obtained 11601 // with the current slices. 11602 OrigCost.addSliceGain(LS); 11603 } 11604 11605 // If the target supports paired load, adjust the cost accordingly. 11606 adjustCostForPairing(LoadedSlices, GlobalSlicingCost); 11607 return OrigCost > GlobalSlicingCost; 11608 } 11609 11610 /// \brief If the given load, \p LI, is used only by trunc or trunc(lshr) 11611 /// operations, split it in the various pieces being extracted. 11612 /// 11613 /// This sort of thing is introduced by SROA. 11614 /// This slicing takes care not to insert overlapping loads. 11615 /// \pre LI is a simple load (i.e., not an atomic or volatile load). 11616 bool DAGCombiner::SliceUpLoad(SDNode *N) { 11617 if (Level < AfterLegalizeDAG) 11618 return false; 11619 11620 LoadSDNode *LD = cast<LoadSDNode>(N); 11621 if (LD->isVolatile() || !ISD::isNormalLoad(LD) || 11622 !LD->getValueType(0).isInteger()) 11623 return false; 11624 11625 // Keep track of already used bits to detect overlapping values. 11626 // In that case, we will just abort the transformation. 11627 APInt UsedBits(LD->getValueSizeInBits(0), 0); 11628 11629 SmallVector<LoadedSlice, 4> LoadedSlices; 11630 11631 // Check if this load is used as several smaller chunks of bits. 11632 // Basically, look for uses in trunc or trunc(lshr) and record a new chain 11633 // of computation for each trunc. 11634 for (SDNode::use_iterator UI = LD->use_begin(), UIEnd = LD->use_end(); 11635 UI != UIEnd; ++UI) { 11636 // Skip the uses of the chain. 11637 if (UI.getUse().getResNo() != 0) 11638 continue; 11639 11640 SDNode *User = *UI; 11641 unsigned Shift = 0; 11642 11643 // Check if this is a trunc(lshr). 11644 if (User->getOpcode() == ISD::SRL && User->hasOneUse() && 11645 isa<ConstantSDNode>(User->getOperand(1))) { 11646 Shift = cast<ConstantSDNode>(User->getOperand(1))->getZExtValue(); 11647 User = *User->use_begin(); 11648 } 11649 11650 // At this point, User is a Truncate, iff we encountered, trunc or 11651 // trunc(lshr). 11652 if (User->getOpcode() != ISD::TRUNCATE) 11653 return false; 11654 11655 // The width of the type must be a power of 2 and greater than 8-bits. 11656 // Otherwise the load cannot be represented in LLVM IR. 11657 // Moreover, if we shifted with a non-8-bits multiple, the slice 11658 // will be across several bytes. We do not support that. 11659 unsigned Width = User->getValueSizeInBits(0); 11660 if (Width < 8 || !isPowerOf2_32(Width) || (Shift & 0x7)) 11661 return 0; 11662 11663 // Build the slice for this chain of computations. 11664 LoadedSlice LS(User, LD, Shift, &DAG); 11665 APInt CurrentUsedBits = LS.getUsedBits(); 11666 11667 // Check if this slice overlaps with another. 11668 if ((CurrentUsedBits & UsedBits) != 0) 11669 return false; 11670 // Update the bits used globally. 11671 UsedBits |= CurrentUsedBits; 11672 11673 // Check if the new slice would be legal. 11674 if (!LS.isLegal()) 11675 return false; 11676 11677 // Record the slice. 11678 LoadedSlices.push_back(LS); 11679 } 11680 11681 // Abort slicing if it does not seem to be profitable. 11682 if (!isSlicingProfitable(LoadedSlices, UsedBits, ForCodeSize)) 11683 return false; 11684 11685 ++SlicedLoads; 11686 11687 // Rewrite each chain to use an independent load. 11688 // By construction, each chain can be represented by a unique load. 11689 11690 // Prepare the argument for the new token factor for all the slices. 11691 SmallVector<SDValue, 8> ArgChains; 11692 for (SmallVectorImpl<LoadedSlice>::const_iterator 11693 LSIt = LoadedSlices.begin(), 11694 LSItEnd = LoadedSlices.end(); 11695 LSIt != LSItEnd; ++LSIt) { 11696 SDValue SliceInst = LSIt->loadSlice(); 11697 CombineTo(LSIt->Inst, SliceInst, true); 11698 if (SliceInst.getOpcode() != ISD::LOAD) 11699 SliceInst = SliceInst.getOperand(0); 11700 assert(SliceInst->getOpcode() == ISD::LOAD && 11701 "It takes more than a zext to get to the loaded slice!!"); 11702 ArgChains.push_back(SliceInst.getValue(1)); 11703 } 11704 11705 SDValue Chain = DAG.getNode(ISD::TokenFactor, SDLoc(LD), MVT::Other, 11706 ArgChains); 11707 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), Chain); 11708 AddToWorklist(Chain.getNode()); 11709 return true; 11710 } 11711 11712 /// Check to see if V is (and load (ptr), imm), where the load is having 11713 /// specific bytes cleared out. If so, return the byte size being masked out 11714 /// and the shift amount. 11715 static std::pair<unsigned, unsigned> 11716 CheckForMaskedLoad(SDValue V, SDValue Ptr, SDValue Chain) { 11717 std::pair<unsigned, unsigned> Result(0, 0); 11718 11719 // Check for the structure we're looking for. 11720 if (V->getOpcode() != ISD::AND || 11721 !isa<ConstantSDNode>(V->getOperand(1)) || 11722 !ISD::isNormalLoad(V->getOperand(0).getNode())) 11723 return Result; 11724 11725 // Check the chain and pointer. 11726 LoadSDNode *LD = cast<LoadSDNode>(V->getOperand(0)); 11727 if (LD->getBasePtr() != Ptr) return Result; // Not from same pointer. 11728 11729 // The store should be chained directly to the load or be an operand of a 11730 // tokenfactor. 11731 if (LD == Chain.getNode()) 11732 ; // ok. 11733 else if (Chain->getOpcode() != ISD::TokenFactor) 11734 return Result; // Fail. 11735 else { 11736 bool isOk = false; 11737 for (const SDValue &ChainOp : Chain->op_values()) 11738 if (ChainOp.getNode() == LD) { 11739 isOk = true; 11740 break; 11741 } 11742 if (!isOk) return Result; 11743 } 11744 11745 // This only handles simple types. 11746 if (V.getValueType() != MVT::i16 && 11747 V.getValueType() != MVT::i32 && 11748 V.getValueType() != MVT::i64) 11749 return Result; 11750 11751 // Check the constant mask. Invert it so that the bits being masked out are 11752 // 0 and the bits being kept are 1. Use getSExtValue so that leading bits 11753 // follow the sign bit for uniformity. 11754 uint64_t NotMask = ~cast<ConstantSDNode>(V->getOperand(1))->getSExtValue(); 11755 unsigned NotMaskLZ = countLeadingZeros(NotMask); 11756 if (NotMaskLZ & 7) return Result; // Must be multiple of a byte. 11757 unsigned NotMaskTZ = countTrailingZeros(NotMask); 11758 if (NotMaskTZ & 7) return Result; // Must be multiple of a byte. 11759 if (NotMaskLZ == 64) return Result; // All zero mask. 11760 11761 // See if we have a continuous run of bits. If so, we have 0*1+0* 11762 if (countTrailingOnes(NotMask >> NotMaskTZ) + NotMaskTZ + NotMaskLZ != 64) 11763 return Result; 11764 11765 // Adjust NotMaskLZ down to be from the actual size of the int instead of i64. 11766 if (V.getValueType() != MVT::i64 && NotMaskLZ) 11767 NotMaskLZ -= 64-V.getValueSizeInBits(); 11768 11769 unsigned MaskedBytes = (V.getValueSizeInBits()-NotMaskLZ-NotMaskTZ)/8; 11770 switch (MaskedBytes) { 11771 case 1: 11772 case 2: 11773 case 4: break; 11774 default: return Result; // All one mask, or 5-byte mask. 11775 } 11776 11777 // Verify that the first bit starts at a multiple of mask so that the access 11778 // is aligned the same as the access width. 11779 if (NotMaskTZ && NotMaskTZ/8 % MaskedBytes) return Result; 11780 11781 Result.first = MaskedBytes; 11782 Result.second = NotMaskTZ/8; 11783 return Result; 11784 } 11785 11786 11787 /// Check to see if IVal is something that provides a value as specified by 11788 /// MaskInfo. If so, replace the specified store with a narrower store of 11789 /// truncated IVal. 11790 static SDNode * 11791 ShrinkLoadReplaceStoreWithStore(const std::pair<unsigned, unsigned> &MaskInfo, 11792 SDValue IVal, StoreSDNode *St, 11793 DAGCombiner *DC) { 11794 unsigned NumBytes = MaskInfo.first; 11795 unsigned ByteShift = MaskInfo.second; 11796 SelectionDAG &DAG = DC->getDAG(); 11797 11798 // Check to see if IVal is all zeros in the part being masked in by the 'or' 11799 // that uses this. If not, this is not a replacement. 11800 APInt Mask = ~APInt::getBitsSet(IVal.getValueSizeInBits(), 11801 ByteShift*8, (ByteShift+NumBytes)*8); 11802 if (!DAG.MaskedValueIsZero(IVal, Mask)) return nullptr; 11803 11804 // Check that it is legal on the target to do this. It is legal if the new 11805 // VT we're shrinking to (i8/i16/i32) is legal or we're still before type 11806 // legalization. 11807 MVT VT = MVT::getIntegerVT(NumBytes*8); 11808 if (!DC->isTypeLegal(VT)) 11809 return nullptr; 11810 11811 // Okay, we can do this! Replace the 'St' store with a store of IVal that is 11812 // shifted by ByteShift and truncated down to NumBytes. 11813 if (ByteShift) { 11814 SDLoc DL(IVal); 11815 IVal = DAG.getNode(ISD::SRL, DL, IVal.getValueType(), IVal, 11816 DAG.getConstant(ByteShift*8, DL, 11817 DC->getShiftAmountTy(IVal.getValueType()))); 11818 } 11819 11820 // Figure out the offset for the store and the alignment of the access. 11821 unsigned StOffset; 11822 unsigned NewAlign = St->getAlignment(); 11823 11824 if (DAG.getDataLayout().isLittleEndian()) 11825 StOffset = ByteShift; 11826 else 11827 StOffset = IVal.getValueType().getStoreSize() - ByteShift - NumBytes; 11828 11829 SDValue Ptr = St->getBasePtr(); 11830 if (StOffset) { 11831 SDLoc DL(IVal); 11832 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), 11833 Ptr, DAG.getConstant(StOffset, DL, Ptr.getValueType())); 11834 NewAlign = MinAlign(NewAlign, StOffset); 11835 } 11836 11837 // Truncate down to the new size. 11838 IVal = DAG.getNode(ISD::TRUNCATE, SDLoc(IVal), VT, IVal); 11839 11840 ++OpsNarrowed; 11841 return DAG 11842 .getStore(St->getChain(), SDLoc(St), IVal, Ptr, 11843 St->getPointerInfo().getWithOffset(StOffset), NewAlign) 11844 .getNode(); 11845 } 11846 11847 11848 /// Look for sequence of load / op / store where op is one of 'or', 'xor', and 11849 /// 'and' of immediates. If 'op' is only touching some of the loaded bits, try 11850 /// narrowing the load and store if it would end up being a win for performance 11851 /// or code size. 11852 SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) { 11853 StoreSDNode *ST = cast<StoreSDNode>(N); 11854 if (ST->isVolatile()) 11855 return SDValue(); 11856 11857 SDValue Chain = ST->getChain(); 11858 SDValue Value = ST->getValue(); 11859 SDValue Ptr = ST->getBasePtr(); 11860 EVT VT = Value.getValueType(); 11861 11862 if (ST->isTruncatingStore() || VT.isVector() || !Value.hasOneUse()) 11863 return SDValue(); 11864 11865 unsigned Opc = Value.getOpcode(); 11866 11867 // If this is "store (or X, Y), P" and X is "(and (load P), cst)", where cst 11868 // is a byte mask indicating a consecutive number of bytes, check to see if 11869 // Y is known to provide just those bytes. If so, we try to replace the 11870 // load + replace + store sequence with a single (narrower) store, which makes 11871 // the load dead. 11872 if (Opc == ISD::OR) { 11873 std::pair<unsigned, unsigned> MaskedLoad; 11874 MaskedLoad = CheckForMaskedLoad(Value.getOperand(0), Ptr, Chain); 11875 if (MaskedLoad.first) 11876 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 11877 Value.getOperand(1), ST,this)) 11878 return SDValue(NewST, 0); 11879 11880 // Or is commutative, so try swapping X and Y. 11881 MaskedLoad = CheckForMaskedLoad(Value.getOperand(1), Ptr, Chain); 11882 if (MaskedLoad.first) 11883 if (SDNode *NewST = ShrinkLoadReplaceStoreWithStore(MaskedLoad, 11884 Value.getOperand(0), ST,this)) 11885 return SDValue(NewST, 0); 11886 } 11887 11888 if ((Opc != ISD::OR && Opc != ISD::XOR && Opc != ISD::AND) || 11889 Value.getOperand(1).getOpcode() != ISD::Constant) 11890 return SDValue(); 11891 11892 SDValue N0 = Value.getOperand(0); 11893 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 11894 Chain == SDValue(N0.getNode(), 1)) { 11895 LoadSDNode *LD = cast<LoadSDNode>(N0); 11896 if (LD->getBasePtr() != Ptr || 11897 LD->getPointerInfo().getAddrSpace() != 11898 ST->getPointerInfo().getAddrSpace()) 11899 return SDValue(); 11900 11901 // Find the type to narrow it the load / op / store to. 11902 SDValue N1 = Value.getOperand(1); 11903 unsigned BitWidth = N1.getValueSizeInBits(); 11904 APInt Imm = cast<ConstantSDNode>(N1)->getAPIntValue(); 11905 if (Opc == ISD::AND) 11906 Imm ^= APInt::getAllOnesValue(BitWidth); 11907 if (Imm == 0 || Imm.isAllOnesValue()) 11908 return SDValue(); 11909 unsigned ShAmt = Imm.countTrailingZeros(); 11910 unsigned MSB = BitWidth - Imm.countLeadingZeros() - 1; 11911 unsigned NewBW = NextPowerOf2(MSB - ShAmt); 11912 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 11913 // The narrowing should be profitable, the load/store operation should be 11914 // legal (or custom) and the store size should be equal to the NewVT width. 11915 while (NewBW < BitWidth && 11916 (NewVT.getStoreSizeInBits() != NewBW || 11917 !TLI.isOperationLegalOrCustom(Opc, NewVT) || 11918 !TLI.isNarrowingProfitable(VT, NewVT))) { 11919 NewBW = NextPowerOf2(NewBW); 11920 NewVT = EVT::getIntegerVT(*DAG.getContext(), NewBW); 11921 } 11922 if (NewBW >= BitWidth) 11923 return SDValue(); 11924 11925 // If the lsb changed does not start at the type bitwidth boundary, 11926 // start at the previous one. 11927 if (ShAmt % NewBW) 11928 ShAmt = (((ShAmt + NewBW - 1) / NewBW) * NewBW) - NewBW; 11929 APInt Mask = APInt::getBitsSet(BitWidth, ShAmt, 11930 std::min(BitWidth, ShAmt + NewBW)); 11931 if ((Imm & Mask) == Imm) { 11932 APInt NewImm = (Imm & Mask).lshr(ShAmt).trunc(NewBW); 11933 if (Opc == ISD::AND) 11934 NewImm ^= APInt::getAllOnesValue(NewBW); 11935 uint64_t PtrOff = ShAmt / 8; 11936 // For big endian targets, we need to adjust the offset to the pointer to 11937 // load the correct bytes. 11938 if (DAG.getDataLayout().isBigEndian()) 11939 PtrOff = (BitWidth + 7 - NewBW) / 8 - PtrOff; 11940 11941 unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff); 11942 Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext()); 11943 if (NewAlign < DAG.getDataLayout().getABITypeAlignment(NewVTTy)) 11944 return SDValue(); 11945 11946 SDValue NewPtr = DAG.getNode(ISD::ADD, SDLoc(LD), 11947 Ptr.getValueType(), Ptr, 11948 DAG.getConstant(PtrOff, SDLoc(LD), 11949 Ptr.getValueType())); 11950 SDValue NewLD = 11951 DAG.getLoad(NewVT, SDLoc(N0), LD->getChain(), NewPtr, 11952 LD->getPointerInfo().getWithOffset(PtrOff), NewAlign, 11953 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 11954 SDValue NewVal = DAG.getNode(Opc, SDLoc(Value), NewVT, NewLD, 11955 DAG.getConstant(NewImm, SDLoc(Value), 11956 NewVT)); 11957 SDValue NewST = 11958 DAG.getStore(Chain, SDLoc(N), NewVal, NewPtr, 11959 ST->getPointerInfo().getWithOffset(PtrOff), NewAlign); 11960 11961 AddToWorklist(NewPtr.getNode()); 11962 AddToWorklist(NewLD.getNode()); 11963 AddToWorklist(NewVal.getNode()); 11964 WorklistRemover DeadNodes(*this); 11965 DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), NewLD.getValue(1)); 11966 ++OpsNarrowed; 11967 return NewST; 11968 } 11969 } 11970 11971 return SDValue(); 11972 } 11973 11974 /// For a given floating point load / store pair, if the load value isn't used 11975 /// by any other operations, then consider transforming the pair to integer 11976 /// load / store operations if the target deems the transformation profitable. 11977 SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) { 11978 StoreSDNode *ST = cast<StoreSDNode>(N); 11979 SDValue Chain = ST->getChain(); 11980 SDValue Value = ST->getValue(); 11981 if (ISD::isNormalStore(ST) && ISD::isNormalLoad(Value.getNode()) && 11982 Value.hasOneUse() && 11983 Chain == SDValue(Value.getNode(), 1)) { 11984 LoadSDNode *LD = cast<LoadSDNode>(Value); 11985 EVT VT = LD->getMemoryVT(); 11986 if (!VT.isFloatingPoint() || 11987 VT != ST->getMemoryVT() || 11988 LD->isNonTemporal() || 11989 ST->isNonTemporal() || 11990 LD->getPointerInfo().getAddrSpace() != 0 || 11991 ST->getPointerInfo().getAddrSpace() != 0) 11992 return SDValue(); 11993 11994 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 11995 if (!TLI.isOperationLegal(ISD::LOAD, IntVT) || 11996 !TLI.isOperationLegal(ISD::STORE, IntVT) || 11997 !TLI.isDesirableToTransformToIntegerOp(ISD::LOAD, VT) || 11998 !TLI.isDesirableToTransformToIntegerOp(ISD::STORE, VT)) 11999 return SDValue(); 12000 12001 unsigned LDAlign = LD->getAlignment(); 12002 unsigned STAlign = ST->getAlignment(); 12003 Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext()); 12004 unsigned ABIAlign = DAG.getDataLayout().getABITypeAlignment(IntVTTy); 12005 if (LDAlign < ABIAlign || STAlign < ABIAlign) 12006 return SDValue(); 12007 12008 SDValue NewLD = 12009 DAG.getLoad(IntVT, SDLoc(Value), LD->getChain(), LD->getBasePtr(), 12010 LD->getPointerInfo(), LDAlign); 12011 12012 SDValue NewST = 12013 DAG.getStore(NewLD.getValue(1), SDLoc(N), NewLD, ST->getBasePtr(), 12014 ST->getPointerInfo(), STAlign); 12015 12016 AddToWorklist(NewLD.getNode()); 12017 AddToWorklist(NewST.getNode()); 12018 WorklistRemover DeadNodes(*this); 12019 DAG.ReplaceAllUsesOfValueWith(Value.getValue(1), NewLD.getValue(1)); 12020 ++LdStFP2Int; 12021 return NewST; 12022 } 12023 12024 return SDValue(); 12025 } 12026 12027 // This is a helper function for visitMUL to check the profitability 12028 // of folding (mul (add x, c1), c2) -> (add (mul x, c2), c1*c2). 12029 // MulNode is the original multiply, AddNode is (add x, c1), 12030 // and ConstNode is c2. 12031 // 12032 // If the (add x, c1) has multiple uses, we could increase 12033 // the number of adds if we make this transformation. 12034 // It would only be worth doing this if we can remove a 12035 // multiply in the process. Check for that here. 12036 // To illustrate: 12037 // (A + c1) * c3 12038 // (A + c2) * c3 12039 // We're checking for cases where we have common "c3 * A" expressions. 12040 bool DAGCombiner::isMulAddWithConstProfitable(SDNode *MulNode, 12041 SDValue &AddNode, 12042 SDValue &ConstNode) { 12043 APInt Val; 12044 12045 // If the add only has one use, this would be OK to do. 12046 if (AddNode.getNode()->hasOneUse()) 12047 return true; 12048 12049 // Walk all the users of the constant with which we're multiplying. 12050 for (SDNode *Use : ConstNode->uses()) { 12051 12052 if (Use == MulNode) // This use is the one we're on right now. Skip it. 12053 continue; 12054 12055 if (Use->getOpcode() == ISD::MUL) { // We have another multiply use. 12056 SDNode *OtherOp; 12057 SDNode *MulVar = AddNode.getOperand(0).getNode(); 12058 12059 // OtherOp is what we're multiplying against the constant. 12060 if (Use->getOperand(0) == ConstNode) 12061 OtherOp = Use->getOperand(1).getNode(); 12062 else 12063 OtherOp = Use->getOperand(0).getNode(); 12064 12065 // Check to see if multiply is with the same operand of our "add". 12066 // 12067 // ConstNode = CONST 12068 // Use = ConstNode * A <-- visiting Use. OtherOp is A. 12069 // ... 12070 // AddNode = (A + c1) <-- MulVar is A. 12071 // = AddNode * ConstNode <-- current visiting instruction. 12072 // 12073 // If we make this transformation, we will have a common 12074 // multiply (ConstNode * A) that we can save. 12075 if (OtherOp == MulVar) 12076 return true; 12077 12078 // Now check to see if a future expansion will give us a common 12079 // multiply. 12080 // 12081 // ConstNode = CONST 12082 // AddNode = (A + c1) 12083 // ... = AddNode * ConstNode <-- current visiting instruction. 12084 // ... 12085 // OtherOp = (A + c2) 12086 // Use = OtherOp * ConstNode <-- visiting Use. 12087 // 12088 // If we make this transformation, we will have a common 12089 // multiply (CONST * A) after we also do the same transformation 12090 // to the "t2" instruction. 12091 if (OtherOp->getOpcode() == ISD::ADD && 12092 DAG.isConstantIntBuildVectorOrConstantInt(OtherOp->getOperand(1)) && 12093 OtherOp->getOperand(0).getNode() == MulVar) 12094 return true; 12095 } 12096 } 12097 12098 // Didn't find a case where this would be profitable. 12099 return false; 12100 } 12101 12102 SDValue DAGCombiner::getMergeStoreChains(SmallVectorImpl<MemOpLink> &StoreNodes, 12103 unsigned NumStores) { 12104 SmallVector<SDValue, 8> Chains; 12105 SmallPtrSet<const SDNode *, 8> Visited; 12106 SDLoc StoreDL(StoreNodes[0].MemNode); 12107 12108 for (unsigned i = 0; i < NumStores; ++i) { 12109 Visited.insert(StoreNodes[i].MemNode); 12110 } 12111 12112 // don't include nodes that are children 12113 for (unsigned i = 0; i < NumStores; ++i) { 12114 if (Visited.count(StoreNodes[i].MemNode->getChain().getNode()) == 0) 12115 Chains.push_back(StoreNodes[i].MemNode->getChain()); 12116 } 12117 12118 assert(Chains.size() > 0 && "Chain should have generated a chain"); 12119 return DAG.getNode(ISD::TokenFactor, StoreDL, MVT::Other, Chains); 12120 } 12121 12122 bool DAGCombiner::MergeStoresOfConstantsOrVecElts( 12123 SmallVectorImpl<MemOpLink> &StoreNodes, EVT MemVT, 12124 unsigned NumStores, bool IsConstantSrc, bool UseVector) { 12125 // Make sure we have something to merge. 12126 if (NumStores < 2) 12127 return false; 12128 12129 int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8; 12130 12131 // The latest Node in the DAG. 12132 SDLoc DL(StoreNodes[0].MemNode); 12133 12134 SDValue StoredVal; 12135 if (UseVector) { 12136 bool IsVec = MemVT.isVector(); 12137 unsigned Elts = NumStores; 12138 if (IsVec) { 12139 // When merging vector stores, get the total number of elements. 12140 Elts *= MemVT.getVectorNumElements(); 12141 } 12142 // Get the type for the merged vector store. 12143 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts); 12144 assert(TLI.isTypeLegal(Ty) && "Illegal vector store"); 12145 12146 if (IsConstantSrc) { 12147 SmallVector<SDValue, 8> BuildVector; 12148 for (unsigned I = 0, E = Ty.getVectorNumElements(); I != E; ++I) { 12149 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[I].MemNode); 12150 SDValue Val = St->getValue(); 12151 if (MemVT.getScalarType().isInteger()) 12152 if (auto *CFP = dyn_cast<ConstantFPSDNode>(St->getValue())) 12153 Val = DAG.getConstant( 12154 (uint32_t)CFP->getValueAPF().bitcastToAPInt().getZExtValue(), 12155 SDLoc(CFP), MemVT); 12156 BuildVector.push_back(Val); 12157 } 12158 StoredVal = DAG.getBuildVector(Ty, DL, BuildVector); 12159 } else { 12160 SmallVector<SDValue, 8> Ops; 12161 for (unsigned i = 0; i < NumStores; ++i) { 12162 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 12163 SDValue Val = St->getValue(); 12164 // All operands of BUILD_VECTOR / CONCAT_VECTOR must have the same type. 12165 if (Val.getValueType() != MemVT) 12166 return false; 12167 Ops.push_back(Val); 12168 } 12169 12170 // Build the extracted vector elements back into a vector. 12171 StoredVal = DAG.getNode(IsVec ? ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, 12172 DL, Ty, Ops); } 12173 } else { 12174 // We should always use a vector store when merging extracted vector 12175 // elements, so this path implies a store of constants. 12176 assert(IsConstantSrc && "Merged vector elements should use vector store"); 12177 12178 unsigned SizeInBits = NumStores * ElementSizeBytes * 8; 12179 APInt StoreInt(SizeInBits, 0); 12180 12181 // Construct a single integer constant which is made of the smaller 12182 // constant inputs. 12183 bool IsLE = DAG.getDataLayout().isLittleEndian(); 12184 for (unsigned i = 0; i < NumStores; ++i) { 12185 unsigned Idx = IsLE ? (NumStores - 1 - i) : i; 12186 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[Idx].MemNode); 12187 12188 SDValue Val = St->getValue(); 12189 StoreInt <<= ElementSizeBytes * 8; 12190 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) { 12191 StoreInt |= C->getAPIntValue().zext(SizeInBits); 12192 } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Val)) { 12193 StoreInt |= C->getValueAPF().bitcastToAPInt().zext(SizeInBits); 12194 } else { 12195 llvm_unreachable("Invalid constant element type"); 12196 } 12197 } 12198 12199 // Create the new Load and Store operations. 12200 EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits); 12201 StoredVal = DAG.getConstant(StoreInt, DL, StoreTy); 12202 } 12203 12204 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 12205 SDValue NewChain = getMergeStoreChains(StoreNodes, NumStores); 12206 SDValue NewStore = DAG.getStore(NewChain, DL, StoredVal, 12207 FirstInChain->getBasePtr(), 12208 FirstInChain->getPointerInfo(), 12209 FirstInChain->getAlignment()); 12210 12211 // Replace all merged stores with the new store. 12212 for (unsigned i = 0; i < NumStores; ++i) 12213 CombineTo(StoreNodes[i].MemNode, NewStore); 12214 12215 AddToWorklist(NewChain.getNode()); 12216 return true; 12217 } 12218 12219 void DAGCombiner::getStoreMergeCandidates( 12220 StoreSDNode *St, SmallVectorImpl<MemOpLink> &StoreNodes) { 12221 // This holds the base pointer, index, and the offset in bytes from the base 12222 // pointer. 12223 BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG); 12224 EVT MemVT = St->getMemoryVT(); 12225 12226 // We must have a base and an offset. 12227 if (!BasePtr.Base.getNode()) 12228 return; 12229 12230 // Do not handle stores to undef base pointers. 12231 if (BasePtr.Base.isUndef()) 12232 return; 12233 12234 bool IsLoadSrc = isa<LoadSDNode>(St->getValue()); 12235 bool IsConstantSrc = isa<ConstantSDNode>(St->getValue()) || 12236 isa<ConstantFPSDNode>(St->getValue()); 12237 bool IsExtractVecSrc = 12238 (St->getValue().getOpcode() == ISD::EXTRACT_VECTOR_ELT || 12239 St->getValue().getOpcode() == ISD::EXTRACT_SUBVECTOR); 12240 auto CandidateMatch = [&](StoreSDNode *Other, BaseIndexOffset &Ptr) -> bool { 12241 if (Other->isVolatile() || Other->isIndexed()) 12242 return false; 12243 // We can merge constant floats to equivalent integers 12244 if (Other->getMemoryVT() != MemVT) 12245 if (!(MemVT.isInteger() && MemVT.bitsEq(Other->getMemoryVT()) && 12246 isa<ConstantFPSDNode>(Other->getValue()))) 12247 return false; 12248 if (IsLoadSrc) 12249 if (!isa<LoadSDNode>(Other->getValue())) 12250 return false; 12251 if (IsConstantSrc) 12252 if (!(isa<ConstantSDNode>(Other->getValue()) || 12253 isa<ConstantFPSDNode>(Other->getValue()))) 12254 return false; 12255 if (IsExtractVecSrc) 12256 if (!(Other->getValue().getOpcode() == ISD::EXTRACT_VECTOR_ELT || 12257 Other->getValue().getOpcode() == ISD::EXTRACT_SUBVECTOR)) 12258 return false; 12259 Ptr = BaseIndexOffset::match(Other->getBasePtr(), DAG); 12260 return (Ptr.equalBaseIndex(BasePtr)); 12261 }; 12262 // We looking for a root node which is an ancestor to all mergable 12263 // stores. We search up through a load, to our root and then down 12264 // through all children. For instance we will find Store{1,2,3} if 12265 // St is Store1, Store2. or Store3 where the root is not a load 12266 // which always true for nonvolatile ops. TODO: Expand 12267 // the search to find all valid candidates through multiple layers of loads. 12268 // 12269 // Root 12270 // |-------|-------| 12271 // Load Load Store3 12272 // | | 12273 // Store1 Store2 12274 // 12275 // FIXME: We should be able to climb and 12276 // descend TokenFactors to find candidates as well. 12277 12278 SDNode *RootNode = (St->getChain()).getNode(); 12279 12280 if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(RootNode)) { 12281 RootNode = Ldn->getChain().getNode(); 12282 for (auto I = RootNode->use_begin(), E = RootNode->use_end(); I != E; ++I) 12283 if (I.getOperandNo() == 0 && isa<LoadSDNode>(*I)) // walk down chain 12284 for (auto I2 = (*I)->use_begin(), E2 = (*I)->use_end(); I2 != E2; ++I2) 12285 if (I2.getOperandNo() == 0) 12286 if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I2)) { 12287 BaseIndexOffset Ptr; 12288 if (CandidateMatch(OtherST, Ptr)) 12289 StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset)); 12290 } 12291 } else 12292 for (auto I = RootNode->use_begin(), E = RootNode->use_end(); I != E; ++I) 12293 if (I.getOperandNo() == 0) 12294 if (StoreSDNode *OtherST = dyn_cast<StoreSDNode>(*I)) { 12295 BaseIndexOffset Ptr; 12296 if (CandidateMatch(OtherST, Ptr)) 12297 StoreNodes.push_back(MemOpLink(OtherST, Ptr.Offset)); 12298 } 12299 } 12300 12301 // We need to check that merging these stores does not cause a loop 12302 // in the DAG. Any store candidate may depend on another candidate 12303 // indirectly through its operand (we already consider dependencies 12304 // through the chain). Check in parallel by searching up from 12305 // non-chain operands of candidates. 12306 bool DAGCombiner::checkMergeStoreCandidatesForDependencies( 12307 SmallVectorImpl<MemOpLink> &StoreNodes, unsigned NumStores) { 12308 SmallPtrSet<const SDNode *, 16> Visited; 12309 SmallVector<const SDNode *, 8> Worklist; 12310 // search ops of store candidates 12311 for (unsigned i = 0; i < NumStores; ++i) { 12312 SDNode *n = StoreNodes[i].MemNode; 12313 // Potential loops may happen only through non-chain operands 12314 for (unsigned j = 1; j < n->getNumOperands(); ++j) 12315 Worklist.push_back(n->getOperand(j).getNode()); 12316 } 12317 // search through DAG. We can stop early if we find a storenode 12318 for (unsigned i = 0; i < NumStores; ++i) { 12319 if (SDNode::hasPredecessorHelper(StoreNodes[i].MemNode, Visited, Worklist)) 12320 return false; 12321 } 12322 return true; 12323 } 12324 12325 bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) { 12326 if (OptLevel == CodeGenOpt::None) 12327 return false; 12328 12329 EVT MemVT = St->getMemoryVT(); 12330 int64_t ElementSizeBytes = MemVT.getSizeInBits() / 8; 12331 12332 if (MemVT.getSizeInBits() * 2 > MaximumLegalStoreInBits) 12333 return false; 12334 12335 bool NoVectors = DAG.getMachineFunction().getFunction()->hasFnAttribute( 12336 Attribute::NoImplicitFloat); 12337 12338 // This function cannot currently deal with non-byte-sized memory sizes. 12339 if (ElementSizeBytes * 8 != MemVT.getSizeInBits()) 12340 return false; 12341 12342 if (!MemVT.isSimple()) 12343 return false; 12344 12345 // Perform an early exit check. Do not bother looking at stored values that 12346 // are not constants, loads, or extracted vector elements. 12347 SDValue StoredVal = St->getValue(); 12348 bool IsLoadSrc = isa<LoadSDNode>(StoredVal); 12349 bool IsConstantSrc = isa<ConstantSDNode>(StoredVal) || 12350 isa<ConstantFPSDNode>(StoredVal); 12351 bool IsExtractVecSrc = (StoredVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT || 12352 StoredVal.getOpcode() == ISD::EXTRACT_SUBVECTOR); 12353 12354 if (!IsConstantSrc && !IsLoadSrc && !IsExtractVecSrc) 12355 return false; 12356 12357 // Don't merge vectors into wider vectors if the source data comes from loads. 12358 // TODO: This restriction can be lifted by using logic similar to the 12359 // ExtractVecSrc case. 12360 if (MemVT.isVector() && IsLoadSrc) 12361 return false; 12362 12363 SmallVector<MemOpLink, 8> StoreNodes; 12364 // Find potential store merge candidates by searching through chain sub-DAG 12365 getStoreMergeCandidates(St, StoreNodes); 12366 12367 // Check if there is anything to merge. 12368 if (StoreNodes.size() < 2) 12369 return false; 12370 12371 // Sort the memory operands according to their distance from the 12372 // base pointer. 12373 std::sort(StoreNodes.begin(), StoreNodes.end(), 12374 [](MemOpLink LHS, MemOpLink RHS) { 12375 return LHS.OffsetFromBase < RHS.OffsetFromBase; 12376 }); 12377 12378 // Scan the memory operations on the chain and find the first non-consecutive 12379 // store memory address. 12380 unsigned NumConsecutiveStores = 0; 12381 int64_t StartAddress = StoreNodes[0].OffsetFromBase; 12382 12383 // Check that the addresses are consecutive starting from the second 12384 // element in the list of stores. 12385 for (unsigned i = 1, e = StoreNodes.size(); i < e; ++i) { 12386 int64_t CurrAddress = StoreNodes[i].OffsetFromBase; 12387 if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 12388 break; 12389 NumConsecutiveStores = i + 1; 12390 } 12391 12392 if (NumConsecutiveStores < 2) 12393 return false; 12394 12395 // Check that we can merge these candidates without causing a cycle 12396 if (!checkMergeStoreCandidatesForDependencies(StoreNodes, NumConsecutiveStores)) 12397 return false; 12398 12399 12400 // The node with the lowest store address. 12401 LLVMContext &Context = *DAG.getContext(); 12402 const DataLayout &DL = DAG.getDataLayout(); 12403 12404 // Store the constants into memory as one consecutive store. 12405 if (IsConstantSrc) { 12406 bool RV = false; 12407 while (NumConsecutiveStores > 1) { 12408 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 12409 unsigned FirstStoreAS = FirstInChain->getAddressSpace(); 12410 unsigned FirstStoreAlign = FirstInChain->getAlignment(); 12411 unsigned LastLegalType = 0; 12412 unsigned LastLegalVectorType = 0; 12413 bool NonZero = false; 12414 for (unsigned i = 0; i < NumConsecutiveStores; ++i) { 12415 StoreSDNode *ST = cast<StoreSDNode>(StoreNodes[i].MemNode); 12416 SDValue StoredVal = ST->getValue(); 12417 12418 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(StoredVal)) { 12419 NonZero |= !C->isNullValue(); 12420 } else if (ConstantFPSDNode *C = 12421 dyn_cast<ConstantFPSDNode>(StoredVal)) { 12422 NonZero |= !C->getConstantFPValue()->isNullValue(); 12423 } else { 12424 // Non-constant. 12425 break; 12426 } 12427 12428 // Find a legal type for the constant store. 12429 unsigned SizeInBits = (i + 1) * ElementSizeBytes * 8; 12430 EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits); 12431 bool IsFast = false; 12432 if (TLI.isTypeLegal(StoreTy) && 12433 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS, 12434 FirstStoreAlign, &IsFast) && 12435 IsFast) { 12436 LastLegalType = i + 1; 12437 // Or check whether a truncstore is legal. 12438 } else if (TLI.getTypeAction(Context, StoreTy) == 12439 TargetLowering::TypePromoteInteger) { 12440 EVT LegalizedStoredValueTy = 12441 TLI.getTypeToTransformTo(Context, StoredVal.getValueType()); 12442 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) && 12443 TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy, 12444 FirstStoreAS, FirstStoreAlign, &IsFast) && 12445 IsFast) { 12446 LastLegalType = i + 1; 12447 } 12448 } 12449 12450 // We only use vectors if the constant is known to be zero or the target 12451 // allows it and the function is not marked with the noimplicitfloat 12452 // attribute. 12453 if ((!NonZero || 12454 TLI.storeOfVectorConstantIsCheap(MemVT, i + 1, FirstStoreAS)) && 12455 !NoVectors) { 12456 // Find a legal type for the vector store. 12457 EVT Ty = EVT::getVectorVT(Context, MemVT, i + 1); 12458 if (TLI.isTypeLegal(Ty) && TLI.canMergeStoresTo(Ty) && 12459 TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS, 12460 FirstStoreAlign, &IsFast) && 12461 IsFast) 12462 LastLegalVectorType = i + 1; 12463 } 12464 } 12465 12466 // Check if we found a legal integer type that creates a meaningful merge. 12467 if (LastLegalType < 2 && LastLegalVectorType < 2) 12468 break; 12469 12470 bool UseVector = (LastLegalVectorType > LastLegalType) && !NoVectors; 12471 unsigned NumElem = (UseVector) ? LastLegalVectorType : LastLegalType; 12472 12473 bool Merged = MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumElem, 12474 true, UseVector); 12475 if (!Merged) 12476 break; 12477 // Remove merged stores for next iteration. 12478 StoreNodes.erase(StoreNodes.begin(), StoreNodes.begin() + NumElem); 12479 RV = true; 12480 NumConsecutiveStores -= NumElem; 12481 } 12482 return RV; 12483 } 12484 12485 // When extracting multiple vector elements, try to store them 12486 // in one vector store rather than a sequence of scalar stores. 12487 if (IsExtractVecSrc) { 12488 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 12489 unsigned FirstStoreAS = FirstInChain->getAddressSpace(); 12490 unsigned FirstStoreAlign = FirstInChain->getAlignment(); 12491 unsigned NumStoresToMerge = 0; 12492 bool IsVec = MemVT.isVector(); 12493 for (unsigned i = 0; i < NumConsecutiveStores; ++i) { 12494 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 12495 unsigned StoreValOpcode = St->getValue().getOpcode(); 12496 // This restriction could be loosened. 12497 // Bail out if any stored values are not elements extracted from a vector. 12498 // It should be possible to handle mixed sources, but load sources need 12499 // more careful handling (see the block of code below that handles 12500 // consecutive loads). 12501 if (StoreValOpcode != ISD::EXTRACT_VECTOR_ELT && 12502 StoreValOpcode != ISD::EXTRACT_SUBVECTOR) 12503 return false; 12504 12505 // Find a legal type for the vector store. 12506 unsigned Elts = i + 1; 12507 if (IsVec) { 12508 // When merging vector stores, get the total number of elements. 12509 Elts *= MemVT.getVectorNumElements(); 12510 } 12511 EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(), Elts); 12512 bool IsFast; 12513 if (TLI.isTypeLegal(Ty) && 12514 TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS, 12515 FirstStoreAlign, &IsFast) && IsFast) 12516 NumStoresToMerge = i + 1; 12517 } 12518 12519 return MergeStoresOfConstantsOrVecElts(StoreNodes, MemVT, NumStoresToMerge, 12520 false, true); 12521 } 12522 12523 // Below we handle the case of multiple consecutive stores that 12524 // come from multiple consecutive loads. We merge them into a single 12525 // wide load and a single wide store. 12526 12527 // Look for load nodes which are used by the stored values. 12528 SmallVector<MemOpLink, 8> LoadNodes; 12529 12530 // Find acceptable loads. Loads need to have the same chain (token factor), 12531 // must not be zext, volatile, indexed, and they must be consecutive. 12532 BaseIndexOffset LdBasePtr; 12533 for (unsigned i = 0; i < NumConsecutiveStores; ++i) { 12534 StoreSDNode *St = cast<StoreSDNode>(StoreNodes[i].MemNode); 12535 LoadSDNode *Ld = dyn_cast<LoadSDNode>(St->getValue()); 12536 if (!Ld) break; 12537 12538 // Loads must only have one use. 12539 if (!Ld->hasNUsesOfValue(1, 0)) 12540 break; 12541 12542 // The memory operands must not be volatile. 12543 if (Ld->isVolatile() || Ld->isIndexed()) 12544 break; 12545 12546 // We do not accept ext loads. 12547 if (Ld->getExtensionType() != ISD::NON_EXTLOAD) 12548 break; 12549 12550 // The stored memory type must be the same. 12551 if (Ld->getMemoryVT() != MemVT) 12552 break; 12553 12554 BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr(), DAG); 12555 // If this is not the first ptr that we check. 12556 if (LdBasePtr.Base.getNode()) { 12557 // The base ptr must be the same. 12558 if (!LdPtr.equalBaseIndex(LdBasePtr)) 12559 break; 12560 } else { 12561 // Check that all other base pointers are the same as this one. 12562 LdBasePtr = LdPtr; 12563 } 12564 12565 // We found a potential memory operand to merge. 12566 LoadNodes.push_back(MemOpLink(Ld, LdPtr.Offset)); 12567 } 12568 12569 if (LoadNodes.size() < 2) 12570 return false; 12571 12572 // If we have load/store pair instructions and we only have two values, 12573 // don't bother. 12574 unsigned RequiredAlignment; 12575 if (LoadNodes.size() == 2 && TLI.hasPairedLoad(MemVT, RequiredAlignment) && 12576 St->getAlignment() >= RequiredAlignment) 12577 return false; 12578 LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode; 12579 unsigned FirstStoreAS = FirstInChain->getAddressSpace(); 12580 unsigned FirstStoreAlign = FirstInChain->getAlignment(); 12581 LoadSDNode *FirstLoad = cast<LoadSDNode>(LoadNodes[0].MemNode); 12582 unsigned FirstLoadAS = FirstLoad->getAddressSpace(); 12583 unsigned FirstLoadAlign = FirstLoad->getAlignment(); 12584 12585 // Scan the memory operations on the chain and find the first non-consecutive 12586 // load memory address. These variables hold the index in the store node 12587 // array. 12588 unsigned LastConsecutiveLoad = 0; 12589 // This variable refers to the size and not index in the array. 12590 unsigned LastLegalVectorType = 0; 12591 unsigned LastLegalIntegerType = 0; 12592 StartAddress = LoadNodes[0].OffsetFromBase; 12593 SDValue FirstChain = FirstLoad->getChain(); 12594 for (unsigned i = 1; i < LoadNodes.size(); ++i) { 12595 // All loads must share the same chain. 12596 if (LoadNodes[i].MemNode->getChain() != FirstChain) 12597 break; 12598 12599 int64_t CurrAddress = LoadNodes[i].OffsetFromBase; 12600 if (CurrAddress - StartAddress != (ElementSizeBytes * i)) 12601 break; 12602 LastConsecutiveLoad = i; 12603 // Find a legal type for the vector store. 12604 EVT StoreTy = EVT::getVectorVT(Context, MemVT, i+1); 12605 bool IsFastSt, IsFastLd; 12606 if (TLI.isTypeLegal(StoreTy) && 12607 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS, 12608 FirstStoreAlign, &IsFastSt) && IsFastSt && 12609 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS, 12610 FirstLoadAlign, &IsFastLd) && IsFastLd) { 12611 LastLegalVectorType = i + 1; 12612 } 12613 12614 // Find a legal type for the integer store. 12615 unsigned SizeInBits = (i+1) * ElementSizeBytes * 8; 12616 StoreTy = EVT::getIntegerVT(Context, SizeInBits); 12617 if (TLI.isTypeLegal(StoreTy) && 12618 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS, 12619 FirstStoreAlign, &IsFastSt) && IsFastSt && 12620 TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS, 12621 FirstLoadAlign, &IsFastLd) && IsFastLd) 12622 LastLegalIntegerType = i + 1; 12623 // Or check whether a truncstore and extload is legal. 12624 else if (TLI.getTypeAction(Context, StoreTy) == 12625 TargetLowering::TypePromoteInteger) { 12626 EVT LegalizedStoredValueTy = 12627 TLI.getTypeToTransformTo(Context, StoreTy); 12628 if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) && 12629 TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy, StoreTy) && 12630 TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValueTy, StoreTy) && 12631 TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValueTy, StoreTy) && 12632 TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy, 12633 FirstStoreAS, FirstStoreAlign, &IsFastSt) && 12634 IsFastSt && 12635 TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy, 12636 FirstLoadAS, FirstLoadAlign, &IsFastLd) && 12637 IsFastLd) 12638 LastLegalIntegerType = i+1; 12639 } 12640 } 12641 12642 // Only use vector types if the vector type is larger than the integer type. 12643 // If they are the same, use integers. 12644 bool UseVectorTy = LastLegalVectorType > LastLegalIntegerType && !NoVectors; 12645 unsigned LastLegalType = std::max(LastLegalVectorType, LastLegalIntegerType); 12646 12647 // We add +1 here because the LastXXX variables refer to location while 12648 // the NumElem refers to array/index size. 12649 unsigned NumElem = std::min(NumConsecutiveStores, LastConsecutiveLoad + 1); 12650 NumElem = std::min(LastLegalType, NumElem); 12651 12652 if (NumElem < 2) 12653 return false; 12654 12655 // Find if it is better to use vectors or integers to load and store 12656 // to memory. 12657 EVT JointMemOpVT; 12658 if (UseVectorTy) { 12659 JointMemOpVT = EVT::getVectorVT(Context, MemVT, NumElem); 12660 } else { 12661 unsigned SizeInBits = NumElem * ElementSizeBytes * 8; 12662 JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits); 12663 } 12664 12665 SDLoc LoadDL(LoadNodes[0].MemNode); 12666 SDLoc StoreDL(StoreNodes[0].MemNode); 12667 12668 // The merged loads are required to have the same incoming chain, so 12669 // using the first's chain is acceptable. 12670 SDValue NewLoad = DAG.getLoad(JointMemOpVT, LoadDL, FirstLoad->getChain(), 12671 FirstLoad->getBasePtr(), 12672 FirstLoad->getPointerInfo(), FirstLoadAlign); 12673 12674 SDValue NewStoreChain = getMergeStoreChains(StoreNodes, NumElem); 12675 12676 AddToWorklist(NewStoreChain.getNode()); 12677 12678 SDValue NewStore = 12679 DAG.getStore(NewStoreChain, StoreDL, NewLoad, FirstInChain->getBasePtr(), 12680 FirstInChain->getPointerInfo(), FirstStoreAlign); 12681 12682 // Transfer chain users from old loads to the new load. 12683 for (unsigned i = 0; i < NumElem; ++i) { 12684 LoadSDNode *Ld = cast<LoadSDNode>(LoadNodes[i].MemNode); 12685 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), 12686 SDValue(NewLoad.getNode(), 1)); 12687 } 12688 12689 // Replace the all stores with the new store. 12690 for (unsigned i = 0; i < NumElem; ++i) 12691 CombineTo(StoreNodes[i].MemNode, NewStore); 12692 return true; 12693 } 12694 12695 SDValue DAGCombiner::replaceStoreChain(StoreSDNode *ST, SDValue BetterChain) { 12696 SDLoc SL(ST); 12697 SDValue ReplStore; 12698 12699 // Replace the chain to avoid dependency. 12700 if (ST->isTruncatingStore()) { 12701 ReplStore = DAG.getTruncStore(BetterChain, SL, ST->getValue(), 12702 ST->getBasePtr(), ST->getMemoryVT(), 12703 ST->getMemOperand()); 12704 } else { 12705 ReplStore = DAG.getStore(BetterChain, SL, ST->getValue(), ST->getBasePtr(), 12706 ST->getMemOperand()); 12707 } 12708 12709 // Create token to keep both nodes around. 12710 SDValue Token = DAG.getNode(ISD::TokenFactor, SL, 12711 MVT::Other, ST->getChain(), ReplStore); 12712 12713 // Make sure the new and old chains are cleaned up. 12714 AddToWorklist(Token.getNode()); 12715 12716 // Don't add users to work list. 12717 return CombineTo(ST, Token, false); 12718 } 12719 12720 SDValue DAGCombiner::replaceStoreOfFPConstant(StoreSDNode *ST) { 12721 SDValue Value = ST->getValue(); 12722 if (Value.getOpcode() == ISD::TargetConstantFP) 12723 return SDValue(); 12724 12725 SDLoc DL(ST); 12726 12727 SDValue Chain = ST->getChain(); 12728 SDValue Ptr = ST->getBasePtr(); 12729 12730 const ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Value); 12731 12732 // NOTE: If the original store is volatile, this transform must not increase 12733 // the number of stores. For example, on x86-32 an f64 can be stored in one 12734 // processor operation but an i64 (which is not legal) requires two. So the 12735 // transform should not be done in this case. 12736 12737 SDValue Tmp; 12738 switch (CFP->getSimpleValueType(0).SimpleTy) { 12739 default: 12740 llvm_unreachable("Unknown FP type"); 12741 case MVT::f16: // We don't do this for these yet. 12742 case MVT::f80: 12743 case MVT::f128: 12744 case MVT::ppcf128: 12745 return SDValue(); 12746 case MVT::f32: 12747 if ((isTypeLegal(MVT::i32) && !LegalOperations && !ST->isVolatile()) || 12748 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 12749 ; 12750 Tmp = DAG.getConstant((uint32_t)CFP->getValueAPF(). 12751 bitcastToAPInt().getZExtValue(), SDLoc(CFP), 12752 MVT::i32); 12753 return DAG.getStore(Chain, DL, Tmp, Ptr, ST->getMemOperand()); 12754 } 12755 12756 return SDValue(); 12757 case MVT::f64: 12758 if ((TLI.isTypeLegal(MVT::i64) && !LegalOperations && 12759 !ST->isVolatile()) || 12760 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i64)) { 12761 ; 12762 Tmp = DAG.getConstant(CFP->getValueAPF().bitcastToAPInt(). 12763 getZExtValue(), SDLoc(CFP), MVT::i64); 12764 return DAG.getStore(Chain, DL, Tmp, 12765 Ptr, ST->getMemOperand()); 12766 } 12767 12768 if (!ST->isVolatile() && 12769 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::i32)) { 12770 // Many FP stores are not made apparent until after legalize, e.g. for 12771 // argument passing. Since this is so common, custom legalize the 12772 // 64-bit integer store into two 32-bit stores. 12773 uint64_t Val = CFP->getValueAPF().bitcastToAPInt().getZExtValue(); 12774 SDValue Lo = DAG.getConstant(Val & 0xFFFFFFFF, SDLoc(CFP), MVT::i32); 12775 SDValue Hi = DAG.getConstant(Val >> 32, SDLoc(CFP), MVT::i32); 12776 if (DAG.getDataLayout().isBigEndian()) 12777 std::swap(Lo, Hi); 12778 12779 unsigned Alignment = ST->getAlignment(); 12780 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); 12781 AAMDNodes AAInfo = ST->getAAInfo(); 12782 12783 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(), 12784 ST->getAlignment(), MMOFlags, AAInfo); 12785 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, 12786 DAG.getConstant(4, DL, Ptr.getValueType())); 12787 Alignment = MinAlign(Alignment, 4U); 12788 SDValue St1 = DAG.getStore(Chain, DL, Hi, Ptr, 12789 ST->getPointerInfo().getWithOffset(4), 12790 Alignment, MMOFlags, AAInfo); 12791 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 12792 St0, St1); 12793 } 12794 12795 return SDValue(); 12796 } 12797 } 12798 12799 SDValue DAGCombiner::visitSTORE(SDNode *N) { 12800 StoreSDNode *ST = cast<StoreSDNode>(N); 12801 SDValue Chain = ST->getChain(); 12802 SDValue Value = ST->getValue(); 12803 SDValue Ptr = ST->getBasePtr(); 12804 12805 // If this is a store of a bit convert, store the input value if the 12806 // resultant store does not need a higher alignment than the original. 12807 if (Value.getOpcode() == ISD::BITCAST && !ST->isTruncatingStore() && 12808 ST->isUnindexed()) { 12809 EVT SVT = Value.getOperand(0).getValueType(); 12810 if (((!LegalOperations && !ST->isVolatile()) || 12811 TLI.isOperationLegalOrCustom(ISD::STORE, SVT)) && 12812 TLI.isStoreBitCastBeneficial(Value.getValueType(), SVT)) { 12813 unsigned OrigAlign = ST->getAlignment(); 12814 bool Fast = false; 12815 if (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), SVT, 12816 ST->getAddressSpace(), OrigAlign, &Fast) && 12817 Fast) { 12818 return DAG.getStore(Chain, SDLoc(N), Value.getOperand(0), Ptr, 12819 ST->getPointerInfo(), OrigAlign, 12820 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 12821 } 12822 } 12823 } 12824 12825 // Turn 'store undef, Ptr' -> nothing. 12826 if (Value.isUndef() && ST->isUnindexed()) 12827 return Chain; 12828 12829 // Try to infer better alignment information than the store already has. 12830 if (OptLevel != CodeGenOpt::None && ST->isUnindexed()) { 12831 if (unsigned Align = DAG.InferPtrAlignment(Ptr)) { 12832 if (Align > ST->getAlignment()) { 12833 SDValue NewStore = 12834 DAG.getTruncStore(Chain, SDLoc(N), Value, Ptr, ST->getPointerInfo(), 12835 ST->getMemoryVT(), Align, 12836 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 12837 if (NewStore.getNode() != N) 12838 return CombineTo(ST, NewStore, true); 12839 } 12840 } 12841 } 12842 12843 // Try transforming a pair floating point load / store ops to integer 12844 // load / store ops. 12845 if (SDValue NewST = TransformFPLoadStorePair(N)) 12846 return NewST; 12847 12848 if (ST->isUnindexed()) { 12849 // Walk up chain skipping non-aliasing memory nodes, on this store and any 12850 // adjacent stores. 12851 if (findBetterNeighborChains(ST)) { 12852 // replaceStoreChain uses CombineTo, which handled all of the worklist 12853 // manipulation. Return the original node to not do anything else. 12854 return SDValue(ST, 0); 12855 } 12856 Chain = ST->getChain(); 12857 } 12858 12859 // Try transforming N to an indexed store. 12860 if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N)) 12861 return SDValue(N, 0); 12862 12863 // FIXME: is there such a thing as a truncating indexed store? 12864 if (ST->isTruncatingStore() && ST->isUnindexed() && 12865 Value.getValueType().isInteger()) { 12866 // See if we can simplify the input to this truncstore with knowledge that 12867 // only the low bits are being used. For example: 12868 // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8" 12869 SDValue Shorter = GetDemandedBits( 12870 Value, APInt::getLowBitsSet(Value.getScalarValueSizeInBits(), 12871 ST->getMemoryVT().getScalarSizeInBits())); 12872 AddToWorklist(Value.getNode()); 12873 if (Shorter.getNode()) 12874 return DAG.getTruncStore(Chain, SDLoc(N), Shorter, 12875 Ptr, ST->getMemoryVT(), ST->getMemOperand()); 12876 12877 // Otherwise, see if we can simplify the operation with 12878 // SimplifyDemandedBits, which only works if the value has a single use. 12879 if (SimplifyDemandedBits( 12880 Value, 12881 APInt::getLowBitsSet(Value.getScalarValueSizeInBits(), 12882 ST->getMemoryVT().getScalarSizeInBits()))) { 12883 // Re-visit the store if anything changed and the store hasn't been merged 12884 // with another node (N is deleted) SimplifyDemandedBits will add Value's 12885 // node back to the worklist if necessary, but we also need to re-visit 12886 // the Store node itself. 12887 if (N->getOpcode() != ISD::DELETED_NODE) 12888 AddToWorklist(N); 12889 return SDValue(N, 0); 12890 } 12891 } 12892 12893 // If this is a load followed by a store to the same location, then the store 12894 // is dead/noop. 12895 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Value)) { 12896 if (Ld->getBasePtr() == Ptr && ST->getMemoryVT() == Ld->getMemoryVT() && 12897 ST->isUnindexed() && !ST->isVolatile() && 12898 // There can't be any side effects between the load and store, such as 12899 // a call or store. 12900 Chain.reachesChainWithoutSideEffects(SDValue(Ld, 1))) { 12901 // The store is dead, remove it. 12902 return Chain; 12903 } 12904 } 12905 12906 // If this is a store followed by a store with the same value to the same 12907 // location, then the store is dead/noop. 12908 if (StoreSDNode *ST1 = dyn_cast<StoreSDNode>(Chain)) { 12909 if (ST1->getBasePtr() == Ptr && ST->getMemoryVT() == ST1->getMemoryVT() && 12910 ST1->getValue() == Value && ST->isUnindexed() && !ST->isVolatile() && 12911 ST1->isUnindexed() && !ST1->isVolatile()) { 12912 // The store is dead, remove it. 12913 return Chain; 12914 } 12915 } 12916 12917 // If this is an FP_ROUND or TRUNC followed by a store, fold this into a 12918 // truncating store. We can do this even if this is already a truncstore. 12919 if ((Value.getOpcode() == ISD::FP_ROUND || Value.getOpcode() == ISD::TRUNCATE) 12920 && Value.getNode()->hasOneUse() && ST->isUnindexed() && 12921 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(), 12922 ST->getMemoryVT())) { 12923 return DAG.getTruncStore(Chain, SDLoc(N), Value.getOperand(0), 12924 Ptr, ST->getMemoryVT(), ST->getMemOperand()); 12925 } 12926 12927 // Only perform this optimization before the types are legal, because we 12928 // don't want to perform this optimization on every DAGCombine invocation. 12929 if (!LegalTypes) { 12930 for (;;) { 12931 // There can be multiple store sequences on the same chain. 12932 // Keep trying to merge store sequences until we are unable to do so 12933 // or until we merge the last store on the chain. 12934 bool Changed = MergeConsecutiveStores(ST); 12935 if (!Changed) break; 12936 // Return N as merge only uses CombineTo and no worklist clean 12937 // up is necessary. 12938 if (N->getOpcode() == ISD::DELETED_NODE || !isa<StoreSDNode>(N)) 12939 return SDValue(N, 0); 12940 } 12941 } 12942 12943 // Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr' 12944 // 12945 // Make sure to do this only after attempting to merge stores in order to 12946 // avoid changing the types of some subset of stores due to visit order, 12947 // preventing their merging. 12948 if (isa<ConstantFPSDNode>(ST->getValue())) { 12949 if (SDValue NewSt = replaceStoreOfFPConstant(ST)) 12950 return NewSt; 12951 } 12952 12953 if (SDValue NewSt = splitMergedValStore(ST)) 12954 return NewSt; 12955 12956 return ReduceLoadOpStoreWidth(N); 12957 } 12958 12959 /// For the instruction sequence of store below, F and I values 12960 /// are bundled together as an i64 value before being stored into memory. 12961 /// Sometimes it is more efficent to generate separate stores for F and I, 12962 /// which can remove the bitwise instructions or sink them to colder places. 12963 /// 12964 /// (store (or (zext (bitcast F to i32) to i64), 12965 /// (shl (zext I to i64), 32)), addr) --> 12966 /// (store F, addr) and (store I, addr+4) 12967 /// 12968 /// Similarly, splitting for other merged store can also be beneficial, like: 12969 /// For pair of {i32, i32}, i64 store --> two i32 stores. 12970 /// For pair of {i32, i16}, i64 store --> two i32 stores. 12971 /// For pair of {i16, i16}, i32 store --> two i16 stores. 12972 /// For pair of {i16, i8}, i32 store --> two i16 stores. 12973 /// For pair of {i8, i8}, i16 store --> two i8 stores. 12974 /// 12975 /// We allow each target to determine specifically which kind of splitting is 12976 /// supported. 12977 /// 12978 /// The store patterns are commonly seen from the simple code snippet below 12979 /// if only std::make_pair(...) is sroa transformed before inlined into hoo. 12980 /// void goo(const std::pair<int, float> &); 12981 /// hoo() { 12982 /// ... 12983 /// goo(std::make_pair(tmp, ftmp)); 12984 /// ... 12985 /// } 12986 /// 12987 SDValue DAGCombiner::splitMergedValStore(StoreSDNode *ST) { 12988 if (OptLevel == CodeGenOpt::None) 12989 return SDValue(); 12990 12991 SDValue Val = ST->getValue(); 12992 SDLoc DL(ST); 12993 12994 // Match OR operand. 12995 if (!Val.getValueType().isScalarInteger() || Val.getOpcode() != ISD::OR) 12996 return SDValue(); 12997 12998 // Match SHL operand and get Lower and Higher parts of Val. 12999 SDValue Op1 = Val.getOperand(0); 13000 SDValue Op2 = Val.getOperand(1); 13001 SDValue Lo, Hi; 13002 if (Op1.getOpcode() != ISD::SHL) { 13003 std::swap(Op1, Op2); 13004 if (Op1.getOpcode() != ISD::SHL) 13005 return SDValue(); 13006 } 13007 Lo = Op2; 13008 Hi = Op1.getOperand(0); 13009 if (!Op1.hasOneUse()) 13010 return SDValue(); 13011 13012 // Match shift amount to HalfValBitSize. 13013 unsigned HalfValBitSize = Val.getValueSizeInBits() / 2; 13014 ConstantSDNode *ShAmt = dyn_cast<ConstantSDNode>(Op1.getOperand(1)); 13015 if (!ShAmt || ShAmt->getAPIntValue() != HalfValBitSize) 13016 return SDValue(); 13017 13018 // Lo and Hi are zero-extended from int with size less equal than 32 13019 // to i64. 13020 if (Lo.getOpcode() != ISD::ZERO_EXTEND || !Lo.hasOneUse() || 13021 !Lo.getOperand(0).getValueType().isScalarInteger() || 13022 Lo.getOperand(0).getValueSizeInBits() > HalfValBitSize || 13023 Hi.getOpcode() != ISD::ZERO_EXTEND || !Hi.hasOneUse() || 13024 !Hi.getOperand(0).getValueType().isScalarInteger() || 13025 Hi.getOperand(0).getValueSizeInBits() > HalfValBitSize) 13026 return SDValue(); 13027 13028 // Use the EVT of low and high parts before bitcast as the input 13029 // of target query. 13030 EVT LowTy = (Lo.getOperand(0).getOpcode() == ISD::BITCAST) 13031 ? Lo.getOperand(0).getValueType() 13032 : Lo.getValueType(); 13033 EVT HighTy = (Hi.getOperand(0).getOpcode() == ISD::BITCAST) 13034 ? Hi.getOperand(0).getValueType() 13035 : Hi.getValueType(); 13036 if (!TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy)) 13037 return SDValue(); 13038 13039 // Start to split store. 13040 unsigned Alignment = ST->getAlignment(); 13041 MachineMemOperand::Flags MMOFlags = ST->getMemOperand()->getFlags(); 13042 AAMDNodes AAInfo = ST->getAAInfo(); 13043 13044 // Change the sizes of Lo and Hi's value types to HalfValBitSize. 13045 EVT VT = EVT::getIntegerVT(*DAG.getContext(), HalfValBitSize); 13046 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Lo.getOperand(0)); 13047 Hi = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Hi.getOperand(0)); 13048 13049 SDValue Chain = ST->getChain(); 13050 SDValue Ptr = ST->getBasePtr(); 13051 // Lower value store. 13052 SDValue St0 = DAG.getStore(Chain, DL, Lo, Ptr, ST->getPointerInfo(), 13053 ST->getAlignment(), MMOFlags, AAInfo); 13054 Ptr = 13055 DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, 13056 DAG.getConstant(HalfValBitSize / 8, DL, Ptr.getValueType())); 13057 // Higher value store. 13058 SDValue St1 = 13059 DAG.getStore(St0, DL, Hi, Ptr, 13060 ST->getPointerInfo().getWithOffset(HalfValBitSize / 8), 13061 Alignment / 2, MMOFlags, AAInfo); 13062 return St1; 13063 } 13064 13065 SDValue DAGCombiner::visitINSERT_VECTOR_ELT(SDNode *N) { 13066 SDValue InVec = N->getOperand(0); 13067 SDValue InVal = N->getOperand(1); 13068 SDValue EltNo = N->getOperand(2); 13069 SDLoc DL(N); 13070 13071 // If the inserted element is an UNDEF, just use the input vector. 13072 if (InVal.isUndef()) 13073 return InVec; 13074 13075 EVT VT = InVec.getValueType(); 13076 13077 // Check that we know which element is being inserted 13078 if (!isa<ConstantSDNode>(EltNo)) 13079 return SDValue(); 13080 unsigned Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 13081 13082 // Canonicalize insert_vector_elt dag nodes. 13083 // Example: 13084 // (insert_vector_elt (insert_vector_elt A, Idx0), Idx1) 13085 // -> (insert_vector_elt (insert_vector_elt A, Idx1), Idx0) 13086 // 13087 // Do this only if the child insert_vector node has one use; also 13088 // do this only if indices are both constants and Idx1 < Idx0. 13089 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && InVec.hasOneUse() 13090 && isa<ConstantSDNode>(InVec.getOperand(2))) { 13091 unsigned OtherElt = 13092 cast<ConstantSDNode>(InVec.getOperand(2))->getZExtValue(); 13093 if (Elt < OtherElt) { 13094 // Swap nodes. 13095 SDValue NewOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, 13096 InVec.getOperand(0), InVal, EltNo); 13097 AddToWorklist(NewOp.getNode()); 13098 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(InVec.getNode()), 13099 VT, NewOp, InVec.getOperand(1), InVec.getOperand(2)); 13100 } 13101 } 13102 13103 // If we can't generate a legal BUILD_VECTOR, exit 13104 if (LegalOperations && !TLI.isOperationLegal(ISD::BUILD_VECTOR, VT)) 13105 return SDValue(); 13106 13107 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially 13108 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the 13109 // vector elements. 13110 SmallVector<SDValue, 8> Ops; 13111 // Do not combine these two vectors if the output vector will not replace 13112 // the input vector. 13113 if (InVec.getOpcode() == ISD::BUILD_VECTOR && InVec.hasOneUse()) { 13114 Ops.append(InVec.getNode()->op_begin(), 13115 InVec.getNode()->op_end()); 13116 } else if (InVec.isUndef()) { 13117 unsigned NElts = VT.getVectorNumElements(); 13118 Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); 13119 } else { 13120 return SDValue(); 13121 } 13122 13123 // Insert the element 13124 if (Elt < Ops.size()) { 13125 // All the operands of BUILD_VECTOR must have the same type; 13126 // we enforce that here. 13127 EVT OpVT = Ops[0].getValueType(); 13128 Ops[Elt] = OpVT.isInteger() ? DAG.getAnyExtOrTrunc(InVal, DL, OpVT) : InVal; 13129 } 13130 13131 // Return the new vector 13132 return DAG.getBuildVector(VT, DL, Ops); 13133 } 13134 13135 SDValue DAGCombiner::ReplaceExtractVectorEltOfLoadWithNarrowedLoad( 13136 SDNode *EVE, EVT InVecVT, SDValue EltNo, LoadSDNode *OriginalLoad) { 13137 assert(!OriginalLoad->isVolatile()); 13138 13139 EVT ResultVT = EVE->getValueType(0); 13140 EVT VecEltVT = InVecVT.getVectorElementType(); 13141 unsigned Align = OriginalLoad->getAlignment(); 13142 unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment( 13143 VecEltVT.getTypeForEVT(*DAG.getContext())); 13144 13145 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, VecEltVT)) 13146 return SDValue(); 13147 13148 ISD::LoadExtType ExtTy = ResultVT.bitsGT(VecEltVT) ? 13149 ISD::NON_EXTLOAD : ISD::EXTLOAD; 13150 if (!TLI.shouldReduceLoadWidth(OriginalLoad, ExtTy, VecEltVT)) 13151 return SDValue(); 13152 13153 Align = NewAlign; 13154 13155 SDValue NewPtr = OriginalLoad->getBasePtr(); 13156 SDValue Offset; 13157 EVT PtrType = NewPtr.getValueType(); 13158 MachinePointerInfo MPI; 13159 SDLoc DL(EVE); 13160 if (auto *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo)) { 13161 int Elt = ConstEltNo->getZExtValue(); 13162 unsigned PtrOff = VecEltVT.getSizeInBits() * Elt / 8; 13163 Offset = DAG.getConstant(PtrOff, DL, PtrType); 13164 MPI = OriginalLoad->getPointerInfo().getWithOffset(PtrOff); 13165 } else { 13166 Offset = DAG.getZExtOrTrunc(EltNo, DL, PtrType); 13167 Offset = DAG.getNode( 13168 ISD::MUL, DL, PtrType, Offset, 13169 DAG.getConstant(VecEltVT.getStoreSize(), DL, PtrType)); 13170 MPI = OriginalLoad->getPointerInfo(); 13171 } 13172 NewPtr = DAG.getNode(ISD::ADD, DL, PtrType, NewPtr, Offset); 13173 13174 // The replacement we need to do here is a little tricky: we need to 13175 // replace an extractelement of a load with a load. 13176 // Use ReplaceAllUsesOfValuesWith to do the replacement. 13177 // Note that this replacement assumes that the extractvalue is the only 13178 // use of the load; that's okay because we don't want to perform this 13179 // transformation in other cases anyway. 13180 SDValue Load; 13181 SDValue Chain; 13182 if (ResultVT.bitsGT(VecEltVT)) { 13183 // If the result type of vextract is wider than the load, then issue an 13184 // extending load instead. 13185 ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT, 13186 VecEltVT) 13187 ? ISD::ZEXTLOAD 13188 : ISD::EXTLOAD; 13189 Load = DAG.getExtLoad(ExtType, SDLoc(EVE), ResultVT, 13190 OriginalLoad->getChain(), NewPtr, MPI, VecEltVT, 13191 Align, OriginalLoad->getMemOperand()->getFlags(), 13192 OriginalLoad->getAAInfo()); 13193 Chain = Load.getValue(1); 13194 } else { 13195 Load = DAG.getLoad(VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr, 13196 MPI, Align, OriginalLoad->getMemOperand()->getFlags(), 13197 OriginalLoad->getAAInfo()); 13198 Chain = Load.getValue(1); 13199 if (ResultVT.bitsLT(VecEltVT)) 13200 Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load); 13201 else 13202 Load = DAG.getBitcast(ResultVT, Load); 13203 } 13204 WorklistRemover DeadNodes(*this); 13205 SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) }; 13206 SDValue To[] = { Load, Chain }; 13207 DAG.ReplaceAllUsesOfValuesWith(From, To, 2); 13208 // Since we're explicitly calling ReplaceAllUses, add the new node to the 13209 // worklist explicitly as well. 13210 AddToWorklist(Load.getNode()); 13211 AddUsersToWorklist(Load.getNode()); // Add users too 13212 // Make sure to revisit this node to clean it up; it will usually be dead. 13213 AddToWorklist(EVE); 13214 ++OpsNarrowed; 13215 return SDValue(EVE, 0); 13216 } 13217 13218 SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { 13219 // (vextract (scalar_to_vector val, 0) -> val 13220 SDValue InVec = N->getOperand(0); 13221 EVT VT = InVec.getValueType(); 13222 EVT NVT = N->getValueType(0); 13223 13224 if (InVec.isUndef()) 13225 return DAG.getUNDEF(NVT); 13226 13227 if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR) { 13228 // Check if the result type doesn't match the inserted element type. A 13229 // SCALAR_TO_VECTOR may truncate the inserted element and the 13230 // EXTRACT_VECTOR_ELT may widen the extracted vector. 13231 SDValue InOp = InVec.getOperand(0); 13232 if (InOp.getValueType() != NVT) { 13233 assert(InOp.getValueType().isInteger() && NVT.isInteger()); 13234 return DAG.getSExtOrTrunc(InOp, SDLoc(InVec), NVT); 13235 } 13236 return InOp; 13237 } 13238 13239 SDValue EltNo = N->getOperand(1); 13240 ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo); 13241 13242 // extract_vector_elt (build_vector x, y), 1 -> y 13243 if (ConstEltNo && 13244 InVec.getOpcode() == ISD::BUILD_VECTOR && 13245 TLI.isTypeLegal(VT) && 13246 (InVec.hasOneUse() || 13247 TLI.aggressivelyPreferBuildVectorSources(VT))) { 13248 SDValue Elt = InVec.getOperand(ConstEltNo->getZExtValue()); 13249 EVT InEltVT = Elt.getValueType(); 13250 13251 // Sometimes build_vector's scalar input types do not match result type. 13252 if (NVT == InEltVT) 13253 return Elt; 13254 13255 // TODO: It may be useful to truncate if free if the build_vector implicitly 13256 // converts. 13257 } 13258 13259 // extract_vector_elt (v2i32 (bitcast i64:x)), 0 -> i32 (trunc i64:x) 13260 if (ConstEltNo && InVec.getOpcode() == ISD::BITCAST && InVec.hasOneUse() && 13261 ConstEltNo->isNullValue() && VT.isInteger()) { 13262 SDValue BCSrc = InVec.getOperand(0); 13263 if (BCSrc.getValueType().isScalarInteger()) 13264 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), NVT, BCSrc); 13265 } 13266 13267 // extract_vector_elt (insert_vector_elt vec, val, idx), idx) -> val 13268 // 13269 // This only really matters if the index is non-constant since other combines 13270 // on the constant elements already work. 13271 if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT && 13272 EltNo == InVec.getOperand(2)) { 13273 SDValue Elt = InVec.getOperand(1); 13274 return VT.isInteger() ? DAG.getAnyExtOrTrunc(Elt, SDLoc(N), NVT) : Elt; 13275 } 13276 13277 // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT. 13278 // We only perform this optimization before the op legalization phase because 13279 // we may introduce new vector instructions which are not backed by TD 13280 // patterns. For example on AVX, extracting elements from a wide vector 13281 // without using extract_subvector. However, if we can find an underlying 13282 // scalar value, then we can always use that. 13283 if (ConstEltNo && InVec.getOpcode() == ISD::VECTOR_SHUFFLE) { 13284 int NumElem = VT.getVectorNumElements(); 13285 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(InVec); 13286 // Find the new index to extract from. 13287 int OrigElt = SVOp->getMaskElt(ConstEltNo->getZExtValue()); 13288 13289 // Extracting an undef index is undef. 13290 if (OrigElt == -1) 13291 return DAG.getUNDEF(NVT); 13292 13293 // Select the right vector half to extract from. 13294 SDValue SVInVec; 13295 if (OrigElt < NumElem) { 13296 SVInVec = InVec->getOperand(0); 13297 } else { 13298 SVInVec = InVec->getOperand(1); 13299 OrigElt -= NumElem; 13300 } 13301 13302 if (SVInVec.getOpcode() == ISD::BUILD_VECTOR) { 13303 SDValue InOp = SVInVec.getOperand(OrigElt); 13304 if (InOp.getValueType() != NVT) { 13305 assert(InOp.getValueType().isInteger() && NVT.isInteger()); 13306 InOp = DAG.getSExtOrTrunc(InOp, SDLoc(SVInVec), NVT); 13307 } 13308 13309 return InOp; 13310 } 13311 13312 // FIXME: We should handle recursing on other vector shuffles and 13313 // scalar_to_vector here as well. 13314 13315 if (!LegalOperations) { 13316 EVT IndexTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 13317 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), NVT, SVInVec, 13318 DAG.getConstant(OrigElt, SDLoc(SVOp), IndexTy)); 13319 } 13320 } 13321 13322 bool BCNumEltsChanged = false; 13323 EVT ExtVT = VT.getVectorElementType(); 13324 EVT LVT = ExtVT; 13325 13326 // If the result of load has to be truncated, then it's not necessarily 13327 // profitable. 13328 if (NVT.bitsLT(LVT) && !TLI.isTruncateFree(LVT, NVT)) 13329 return SDValue(); 13330 13331 if (InVec.getOpcode() == ISD::BITCAST) { 13332 // Don't duplicate a load with other uses. 13333 if (!InVec.hasOneUse()) 13334 return SDValue(); 13335 13336 EVT BCVT = InVec.getOperand(0).getValueType(); 13337 if (!BCVT.isVector() || ExtVT.bitsGT(BCVT.getVectorElementType())) 13338 return SDValue(); 13339 if (VT.getVectorNumElements() != BCVT.getVectorNumElements()) 13340 BCNumEltsChanged = true; 13341 InVec = InVec.getOperand(0); 13342 ExtVT = BCVT.getVectorElementType(); 13343 } 13344 13345 // (vextract (vN[if]M load $addr), i) -> ([if]M load $addr + i * size) 13346 if (!LegalOperations && !ConstEltNo && InVec.hasOneUse() && 13347 ISD::isNormalLoad(InVec.getNode()) && 13348 !N->getOperand(1)->hasPredecessor(InVec.getNode())) { 13349 SDValue Index = N->getOperand(1); 13350 if (LoadSDNode *OrigLoad = dyn_cast<LoadSDNode>(InVec)) { 13351 if (!OrigLoad->isVolatile()) { 13352 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, Index, 13353 OrigLoad); 13354 } 13355 } 13356 } 13357 13358 // Perform only after legalization to ensure build_vector / vector_shuffle 13359 // optimizations have already been done. 13360 if (!LegalOperations) return SDValue(); 13361 13362 // (vextract (v4f32 load $addr), c) -> (f32 load $addr+c*size) 13363 // (vextract (v4f32 s2v (f32 load $addr)), c) -> (f32 load $addr+c*size) 13364 // (vextract (v4f32 shuffle (load $addr), <1,u,u,u>), 0) -> (f32 load $addr) 13365 13366 if (ConstEltNo) { 13367 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue(); 13368 13369 LoadSDNode *LN0 = nullptr; 13370 const ShuffleVectorSDNode *SVN = nullptr; 13371 if (ISD::isNormalLoad(InVec.getNode())) { 13372 LN0 = cast<LoadSDNode>(InVec); 13373 } else if (InVec.getOpcode() == ISD::SCALAR_TO_VECTOR && 13374 InVec.getOperand(0).getValueType() == ExtVT && 13375 ISD::isNormalLoad(InVec.getOperand(0).getNode())) { 13376 // Don't duplicate a load with other uses. 13377 if (!InVec.hasOneUse()) 13378 return SDValue(); 13379 13380 LN0 = cast<LoadSDNode>(InVec.getOperand(0)); 13381 } else if ((SVN = dyn_cast<ShuffleVectorSDNode>(InVec))) { 13382 // (vextract (vector_shuffle (load $addr), v2, <1, u, u, u>), 1) 13383 // => 13384 // (load $addr+1*size) 13385 13386 // Don't duplicate a load with other uses. 13387 if (!InVec.hasOneUse()) 13388 return SDValue(); 13389 13390 // If the bit convert changed the number of elements, it is unsafe 13391 // to examine the mask. 13392 if (BCNumEltsChanged) 13393 return SDValue(); 13394 13395 // Select the input vector, guarding against out of range extract vector. 13396 unsigned NumElems = VT.getVectorNumElements(); 13397 int Idx = (Elt > (int)NumElems) ? -1 : SVN->getMaskElt(Elt); 13398 InVec = (Idx < (int)NumElems) ? InVec.getOperand(0) : InVec.getOperand(1); 13399 13400 if (InVec.getOpcode() == ISD::BITCAST) { 13401 // Don't duplicate a load with other uses. 13402 if (!InVec.hasOneUse()) 13403 return SDValue(); 13404 13405 InVec = InVec.getOperand(0); 13406 } 13407 if (ISD::isNormalLoad(InVec.getNode())) { 13408 LN0 = cast<LoadSDNode>(InVec); 13409 Elt = (Idx < (int)NumElems) ? Idx : Idx - (int)NumElems; 13410 EltNo = DAG.getConstant(Elt, SDLoc(EltNo), EltNo.getValueType()); 13411 } 13412 } 13413 13414 // Make sure we found a non-volatile load and the extractelement is 13415 // the only use. 13416 if (!LN0 || !LN0->hasNUsesOfValue(1,0) || LN0->isVolatile()) 13417 return SDValue(); 13418 13419 // If Idx was -1 above, Elt is going to be -1, so just return undef. 13420 if (Elt == -1) 13421 return DAG.getUNDEF(LVT); 13422 13423 return ReplaceExtractVectorEltOfLoadWithNarrowedLoad(N, VT, EltNo, LN0); 13424 } 13425 13426 return SDValue(); 13427 } 13428 13429 // Simplify (build_vec (ext )) to (bitcast (build_vec )) 13430 SDValue DAGCombiner::reduceBuildVecExtToExtBuildVec(SDNode *N) { 13431 // We perform this optimization post type-legalization because 13432 // the type-legalizer often scalarizes integer-promoted vectors. 13433 // Performing this optimization before may create bit-casts which 13434 // will be type-legalized to complex code sequences. 13435 // We perform this optimization only before the operation legalizer because we 13436 // may introduce illegal operations. 13437 if (Level != AfterLegalizeVectorOps && Level != AfterLegalizeTypes) 13438 return SDValue(); 13439 13440 unsigned NumInScalars = N->getNumOperands(); 13441 SDLoc DL(N); 13442 EVT VT = N->getValueType(0); 13443 13444 // Check to see if this is a BUILD_VECTOR of a bunch of values 13445 // which come from any_extend or zero_extend nodes. If so, we can create 13446 // a new BUILD_VECTOR using bit-casts which may enable other BUILD_VECTOR 13447 // optimizations. We do not handle sign-extend because we can't fill the sign 13448 // using shuffles. 13449 EVT SourceType = MVT::Other; 13450 bool AllAnyExt = true; 13451 13452 for (unsigned i = 0; i != NumInScalars; ++i) { 13453 SDValue In = N->getOperand(i); 13454 // Ignore undef inputs. 13455 if (In.isUndef()) continue; 13456 13457 bool AnyExt = In.getOpcode() == ISD::ANY_EXTEND; 13458 bool ZeroExt = In.getOpcode() == ISD::ZERO_EXTEND; 13459 13460 // Abort if the element is not an extension. 13461 if (!ZeroExt && !AnyExt) { 13462 SourceType = MVT::Other; 13463 break; 13464 } 13465 13466 // The input is a ZeroExt or AnyExt. Check the original type. 13467 EVT InTy = In.getOperand(0).getValueType(); 13468 13469 // Check that all of the widened source types are the same. 13470 if (SourceType == MVT::Other) 13471 // First time. 13472 SourceType = InTy; 13473 else if (InTy != SourceType) { 13474 // Multiple income types. Abort. 13475 SourceType = MVT::Other; 13476 break; 13477 } 13478 13479 // Check if all of the extends are ANY_EXTENDs. 13480 AllAnyExt &= AnyExt; 13481 } 13482 13483 // In order to have valid types, all of the inputs must be extended from the 13484 // same source type and all of the inputs must be any or zero extend. 13485 // Scalar sizes must be a power of two. 13486 EVT OutScalarTy = VT.getScalarType(); 13487 bool ValidTypes = SourceType != MVT::Other && 13488 isPowerOf2_32(OutScalarTy.getSizeInBits()) && 13489 isPowerOf2_32(SourceType.getSizeInBits()); 13490 13491 // Create a new simpler BUILD_VECTOR sequence which other optimizations can 13492 // turn into a single shuffle instruction. 13493 if (!ValidTypes) 13494 return SDValue(); 13495 13496 bool isLE = DAG.getDataLayout().isLittleEndian(); 13497 unsigned ElemRatio = OutScalarTy.getSizeInBits()/SourceType.getSizeInBits(); 13498 assert(ElemRatio > 1 && "Invalid element size ratio"); 13499 SDValue Filler = AllAnyExt ? DAG.getUNDEF(SourceType): 13500 DAG.getConstant(0, DL, SourceType); 13501 13502 unsigned NewBVElems = ElemRatio * VT.getVectorNumElements(); 13503 SmallVector<SDValue, 8> Ops(NewBVElems, Filler); 13504 13505 // Populate the new build_vector 13506 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 13507 SDValue Cast = N->getOperand(i); 13508 assert((Cast.getOpcode() == ISD::ANY_EXTEND || 13509 Cast.getOpcode() == ISD::ZERO_EXTEND || 13510 Cast.isUndef()) && "Invalid cast opcode"); 13511 SDValue In; 13512 if (Cast.isUndef()) 13513 In = DAG.getUNDEF(SourceType); 13514 else 13515 In = Cast->getOperand(0); 13516 unsigned Index = isLE ? (i * ElemRatio) : 13517 (i * ElemRatio + (ElemRatio - 1)); 13518 13519 assert(Index < Ops.size() && "Invalid index"); 13520 Ops[Index] = In; 13521 } 13522 13523 // The type of the new BUILD_VECTOR node. 13524 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SourceType, NewBVElems); 13525 assert(VecVT.getSizeInBits() == VT.getSizeInBits() && 13526 "Invalid vector size"); 13527 // Check if the new vector type is legal. 13528 if (!isTypeLegal(VecVT)) return SDValue(); 13529 13530 // Make the new BUILD_VECTOR. 13531 SDValue BV = DAG.getBuildVector(VecVT, DL, Ops); 13532 13533 // The new BUILD_VECTOR node has the potential to be further optimized. 13534 AddToWorklist(BV.getNode()); 13535 // Bitcast to the desired type. 13536 return DAG.getBitcast(VT, BV); 13537 } 13538 13539 SDValue DAGCombiner::reduceBuildVecConvertToConvertBuildVec(SDNode *N) { 13540 EVT VT = N->getValueType(0); 13541 13542 unsigned NumInScalars = N->getNumOperands(); 13543 SDLoc DL(N); 13544 13545 EVT SrcVT = MVT::Other; 13546 unsigned Opcode = ISD::DELETED_NODE; 13547 unsigned NumDefs = 0; 13548 13549 for (unsigned i = 0; i != NumInScalars; ++i) { 13550 SDValue In = N->getOperand(i); 13551 unsigned Opc = In.getOpcode(); 13552 13553 if (Opc == ISD::UNDEF) 13554 continue; 13555 13556 // If all scalar values are floats and converted from integers. 13557 if (Opcode == ISD::DELETED_NODE && 13558 (Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP)) { 13559 Opcode = Opc; 13560 } 13561 13562 if (Opc != Opcode) 13563 return SDValue(); 13564 13565 EVT InVT = In.getOperand(0).getValueType(); 13566 13567 // If all scalar values are typed differently, bail out. It's chosen to 13568 // simplify BUILD_VECTOR of integer types. 13569 if (SrcVT == MVT::Other) 13570 SrcVT = InVT; 13571 if (SrcVT != InVT) 13572 return SDValue(); 13573 NumDefs++; 13574 } 13575 13576 // If the vector has just one element defined, it's not worth to fold it into 13577 // a vectorized one. 13578 if (NumDefs < 2) 13579 return SDValue(); 13580 13581 assert((Opcode == ISD::UINT_TO_FP || Opcode == ISD::SINT_TO_FP) 13582 && "Should only handle conversion from integer to float."); 13583 assert(SrcVT != MVT::Other && "Cannot determine source type!"); 13584 13585 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumInScalars); 13586 13587 if (!TLI.isOperationLegalOrCustom(Opcode, NVT)) 13588 return SDValue(); 13589 13590 // Just because the floating-point vector type is legal does not necessarily 13591 // mean that the corresponding integer vector type is. 13592 if (!isTypeLegal(NVT)) 13593 return SDValue(); 13594 13595 SmallVector<SDValue, 8> Opnds; 13596 for (unsigned i = 0; i != NumInScalars; ++i) { 13597 SDValue In = N->getOperand(i); 13598 13599 if (In.isUndef()) 13600 Opnds.push_back(DAG.getUNDEF(SrcVT)); 13601 else 13602 Opnds.push_back(In.getOperand(0)); 13603 } 13604 SDValue BV = DAG.getBuildVector(NVT, DL, Opnds); 13605 AddToWorklist(BV.getNode()); 13606 13607 return DAG.getNode(Opcode, DL, VT, BV); 13608 } 13609 13610 SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N, 13611 ArrayRef<int> VectorMask, 13612 SDValue VecIn1, SDValue VecIn2, 13613 unsigned LeftIdx) { 13614 MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout()); 13615 SDValue ZeroIdx = DAG.getConstant(0, DL, IdxTy); 13616 13617 EVT VT = N->getValueType(0); 13618 EVT InVT1 = VecIn1.getValueType(); 13619 EVT InVT2 = VecIn2.getNode() ? VecIn2.getValueType() : InVT1; 13620 13621 unsigned Vec2Offset = InVT1.getVectorNumElements(); 13622 unsigned NumElems = VT.getVectorNumElements(); 13623 unsigned ShuffleNumElems = NumElems; 13624 13625 // We can't generate a shuffle node with mismatched input and output types. 13626 // Try to make the types match the type of the output. 13627 if (InVT1 != VT || InVT2 != VT) { 13628 if ((VT.getSizeInBits() % InVT1.getSizeInBits() == 0) && InVT1 == InVT2) { 13629 // If the output vector length is a multiple of both input lengths, 13630 // we can concatenate them and pad the rest with undefs. 13631 unsigned NumConcats = VT.getSizeInBits() / InVT1.getSizeInBits(); 13632 assert(NumConcats >= 2 && "Concat needs at least two inputs!"); 13633 SmallVector<SDValue, 2> ConcatOps(NumConcats, DAG.getUNDEF(InVT1)); 13634 ConcatOps[0] = VecIn1; 13635 ConcatOps[1] = VecIn2 ? VecIn2 : DAG.getUNDEF(InVT1); 13636 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps); 13637 VecIn2 = SDValue(); 13638 } else if (InVT1.getSizeInBits() == VT.getSizeInBits() * 2) { 13639 if (!TLI.isExtractSubvectorCheap(VT, NumElems)) 13640 return SDValue(); 13641 13642 if (!VecIn2.getNode()) { 13643 // If we only have one input vector, and it's twice the size of the 13644 // output, split it in two. 13645 VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, 13646 DAG.getConstant(NumElems, DL, IdxTy)); 13647 VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, VecIn1, ZeroIdx); 13648 // Since we now have shorter input vectors, adjust the offset of the 13649 // second vector's start. 13650 Vec2Offset = NumElems; 13651 } else if (InVT2.getSizeInBits() <= InVT1.getSizeInBits()) { 13652 // VecIn1 is wider than the output, and we have another, possibly 13653 // smaller input. Pad the smaller input with undefs, shuffle at the 13654 // input vector width, and extract the output. 13655 // The shuffle type is different than VT, so check legality again. 13656 if (LegalOperations && 13657 !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1)) 13658 return SDValue(); 13659 13660 // Legalizing INSERT_SUBVECTOR is tricky - you basically have to 13661 // lower it back into a BUILD_VECTOR. So if the inserted type is 13662 // illegal, don't even try. 13663 if (InVT1 != InVT2) { 13664 if (!TLI.isTypeLegal(InVT2)) 13665 return SDValue(); 13666 VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1, 13667 DAG.getUNDEF(InVT1), VecIn2, ZeroIdx); 13668 } 13669 ShuffleNumElems = NumElems * 2; 13670 } else { 13671 // Both VecIn1 and VecIn2 are wider than the output, and VecIn2 is wider 13672 // than VecIn1. We can't handle this for now - this case will disappear 13673 // when we start sorting the vectors by type. 13674 return SDValue(); 13675 } 13676 } else { 13677 // TODO: Support cases where the length mismatch isn't exactly by a 13678 // factor of 2. 13679 // TODO: Move this check upwards, so that if we have bad type 13680 // mismatches, we don't create any DAG nodes. 13681 return SDValue(); 13682 } 13683 } 13684 13685 // Initialize mask to undef. 13686 SmallVector<int, 8> Mask(ShuffleNumElems, -1); 13687 13688 // Only need to run up to the number of elements actually used, not the 13689 // total number of elements in the shuffle - if we are shuffling a wider 13690 // vector, the high lanes should be set to undef. 13691 for (unsigned i = 0; i != NumElems; ++i) { 13692 if (VectorMask[i] <= 0) 13693 continue; 13694 13695 unsigned ExtIndex = N->getOperand(i).getConstantOperandVal(1); 13696 if (VectorMask[i] == (int)LeftIdx) { 13697 Mask[i] = ExtIndex; 13698 } else if (VectorMask[i] == (int)LeftIdx + 1) { 13699 Mask[i] = Vec2Offset + ExtIndex; 13700 } 13701 } 13702 13703 // The type the input vectors may have changed above. 13704 InVT1 = VecIn1.getValueType(); 13705 13706 // If we already have a VecIn2, it should have the same type as VecIn1. 13707 // If we don't, get an undef/zero vector of the appropriate type. 13708 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(InVT1); 13709 assert(InVT1 == VecIn2.getValueType() && "Unexpected second input type."); 13710 13711 SDValue Shuffle = DAG.getVectorShuffle(InVT1, DL, VecIn1, VecIn2, Mask); 13712 if (ShuffleNumElems > NumElems) 13713 Shuffle = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuffle, ZeroIdx); 13714 13715 return Shuffle; 13716 } 13717 13718 // Check to see if this is a BUILD_VECTOR of a bunch of EXTRACT_VECTOR_ELT 13719 // operations. If the types of the vectors we're extracting from allow it, 13720 // turn this into a vector_shuffle node. 13721 SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) { 13722 SDLoc DL(N); 13723 EVT VT = N->getValueType(0); 13724 13725 // Only type-legal BUILD_VECTOR nodes are converted to shuffle nodes. 13726 if (!isTypeLegal(VT)) 13727 return SDValue(); 13728 13729 // May only combine to shuffle after legalize if shuffle is legal. 13730 if (LegalOperations && !TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, VT)) 13731 return SDValue(); 13732 13733 bool UsesZeroVector = false; 13734 unsigned NumElems = N->getNumOperands(); 13735 13736 // Record, for each element of the newly built vector, which input vector 13737 // that element comes from. -1 stands for undef, 0 for the zero vector, 13738 // and positive values for the input vectors. 13739 // VectorMask maps each element to its vector number, and VecIn maps vector 13740 // numbers to their initial SDValues. 13741 13742 SmallVector<int, 8> VectorMask(NumElems, -1); 13743 SmallVector<SDValue, 8> VecIn; 13744 VecIn.push_back(SDValue()); 13745 13746 for (unsigned i = 0; i != NumElems; ++i) { 13747 SDValue Op = N->getOperand(i); 13748 13749 if (Op.isUndef()) 13750 continue; 13751 13752 // See if we can use a blend with a zero vector. 13753 // TODO: Should we generalize this to a blend with an arbitrary constant 13754 // vector? 13755 if (isNullConstant(Op) || isNullFPConstant(Op)) { 13756 UsesZeroVector = true; 13757 VectorMask[i] = 0; 13758 continue; 13759 } 13760 13761 // Not an undef or zero. If the input is something other than an 13762 // EXTRACT_VECTOR_ELT with a constant index, bail out. 13763 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 13764 !isa<ConstantSDNode>(Op.getOperand(1))) 13765 return SDValue(); 13766 13767 SDValue ExtractedFromVec = Op.getOperand(0); 13768 13769 // All inputs must have the same element type as the output. 13770 if (VT.getVectorElementType() != 13771 ExtractedFromVec.getValueType().getVectorElementType()) 13772 return SDValue(); 13773 13774 // Have we seen this input vector before? 13775 // The vectors are expected to be tiny (usually 1 or 2 elements), so using 13776 // a map back from SDValues to numbers isn't worth it. 13777 unsigned Idx = std::distance( 13778 VecIn.begin(), std::find(VecIn.begin(), VecIn.end(), ExtractedFromVec)); 13779 if (Idx == VecIn.size()) 13780 VecIn.push_back(ExtractedFromVec); 13781 13782 VectorMask[i] = Idx; 13783 } 13784 13785 // If we didn't find at least one input vector, bail out. 13786 if (VecIn.size() < 2) 13787 return SDValue(); 13788 13789 // TODO: We want to sort the vectors by descending length, so that adjacent 13790 // pairs have similar length, and the longer vector is always first in the 13791 // pair. 13792 13793 // TODO: Should this fire if some of the input vectors has illegal type (like 13794 // it does now), or should we let legalization run its course first? 13795 13796 // Shuffle phase: 13797 // Take pairs of vectors, and shuffle them so that the result has elements 13798 // from these vectors in the correct places. 13799 // For example, given: 13800 // t10: i32 = extract_vector_elt t1, Constant:i64<0> 13801 // t11: i32 = extract_vector_elt t2, Constant:i64<0> 13802 // t12: i32 = extract_vector_elt t3, Constant:i64<0> 13803 // t13: i32 = extract_vector_elt t1, Constant:i64<1> 13804 // t14: v4i32 = BUILD_VECTOR t10, t11, t12, t13 13805 // We will generate: 13806 // t20: v4i32 = vector_shuffle<0,4,u,1> t1, t2 13807 // t21: v4i32 = vector_shuffle<u,u,0,u> t3, undef 13808 SmallVector<SDValue, 4> Shuffles; 13809 for (unsigned In = 0, Len = (VecIn.size() / 2); In < Len; ++In) { 13810 unsigned LeftIdx = 2 * In + 1; 13811 SDValue VecLeft = VecIn[LeftIdx]; 13812 SDValue VecRight = 13813 (LeftIdx + 1) < VecIn.size() ? VecIn[LeftIdx + 1] : SDValue(); 13814 13815 if (SDValue Shuffle = createBuildVecShuffle(DL, N, VectorMask, VecLeft, 13816 VecRight, LeftIdx)) 13817 Shuffles.push_back(Shuffle); 13818 else 13819 return SDValue(); 13820 } 13821 13822 // If we need the zero vector as an "ingredient" in the blend tree, add it 13823 // to the list of shuffles. 13824 if (UsesZeroVector) 13825 Shuffles.push_back(VT.isInteger() ? DAG.getConstant(0, DL, VT) 13826 : DAG.getConstantFP(0.0, DL, VT)); 13827 13828 // If we only have one shuffle, we're done. 13829 if (Shuffles.size() == 1) 13830 return Shuffles[0]; 13831 13832 // Update the vector mask to point to the post-shuffle vectors. 13833 for (int &Vec : VectorMask) 13834 if (Vec == 0) 13835 Vec = Shuffles.size() - 1; 13836 else 13837 Vec = (Vec - 1) / 2; 13838 13839 // More than one shuffle. Generate a binary tree of blends, e.g. if from 13840 // the previous step we got the set of shuffles t10, t11, t12, t13, we will 13841 // generate: 13842 // t10: v8i32 = vector_shuffle<0,8,u,u,u,u,u,u> t1, t2 13843 // t11: v8i32 = vector_shuffle<u,u,0,8,u,u,u,u> t3, t4 13844 // t12: v8i32 = vector_shuffle<u,u,u,u,0,8,u,u> t5, t6 13845 // t13: v8i32 = vector_shuffle<u,u,u,u,u,u,0,8> t7, t8 13846 // t20: v8i32 = vector_shuffle<0,1,10,11,u,u,u,u> t10, t11 13847 // t21: v8i32 = vector_shuffle<u,u,u,u,4,5,14,15> t12, t13 13848 // t30: v8i32 = vector_shuffle<0,1,2,3,12,13,14,15> t20, t21 13849 13850 // Make sure the initial size of the shuffle list is even. 13851 if (Shuffles.size() % 2) 13852 Shuffles.push_back(DAG.getUNDEF(VT)); 13853 13854 for (unsigned CurSize = Shuffles.size(); CurSize > 1; CurSize /= 2) { 13855 if (CurSize % 2) { 13856 Shuffles[CurSize] = DAG.getUNDEF(VT); 13857 CurSize++; 13858 } 13859 for (unsigned In = 0, Len = CurSize / 2; In < Len; ++In) { 13860 int Left = 2 * In; 13861 int Right = 2 * In + 1; 13862 SmallVector<int, 8> Mask(NumElems, -1); 13863 for (unsigned i = 0; i != NumElems; ++i) { 13864 if (VectorMask[i] == Left) { 13865 Mask[i] = i; 13866 VectorMask[i] = In; 13867 } else if (VectorMask[i] == Right) { 13868 Mask[i] = i + NumElems; 13869 VectorMask[i] = In; 13870 } 13871 } 13872 13873 Shuffles[In] = 13874 DAG.getVectorShuffle(VT, DL, Shuffles[Left], Shuffles[Right], Mask); 13875 } 13876 } 13877 13878 return Shuffles[0]; 13879 } 13880 13881 SDValue DAGCombiner::visitBUILD_VECTOR(SDNode *N) { 13882 EVT VT = N->getValueType(0); 13883 13884 // A vector built entirely of undefs is undef. 13885 if (ISD::allOperandsUndef(N)) 13886 return DAG.getUNDEF(VT); 13887 13888 // Check if we can express BUILD VECTOR via subvector extract. 13889 if (!LegalTypes && (N->getNumOperands() > 1)) { 13890 SDValue Op0 = N->getOperand(0); 13891 auto checkElem = [&](SDValue Op) -> uint64_t { 13892 if ((Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) && 13893 (Op0.getOperand(0) == Op.getOperand(0))) 13894 if (auto CNode = dyn_cast<ConstantSDNode>(Op.getOperand(1))) 13895 return CNode->getZExtValue(); 13896 return -1; 13897 }; 13898 13899 int Offset = checkElem(Op0); 13900 for (unsigned i = 0; i < N->getNumOperands(); ++i) { 13901 if (Offset + i != checkElem(N->getOperand(i))) { 13902 Offset = -1; 13903 break; 13904 } 13905 } 13906 13907 if ((Offset == 0) && 13908 (Op0.getOperand(0).getValueType() == N->getValueType(0))) 13909 return Op0.getOperand(0); 13910 if ((Offset != -1) && 13911 ((Offset % N->getValueType(0).getVectorNumElements()) == 13912 0)) // IDX must be multiple of output size. 13913 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), N->getValueType(0), 13914 Op0.getOperand(0), Op0.getOperand(1)); 13915 } 13916 13917 if (SDValue V = reduceBuildVecExtToExtBuildVec(N)) 13918 return V; 13919 13920 if (SDValue V = reduceBuildVecConvertToConvertBuildVec(N)) 13921 return V; 13922 13923 if (SDValue V = reduceBuildVecToShuffle(N)) 13924 return V; 13925 13926 return SDValue(); 13927 } 13928 13929 static SDValue combineConcatVectorOfScalars(SDNode *N, SelectionDAG &DAG) { 13930 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 13931 EVT OpVT = N->getOperand(0).getValueType(); 13932 13933 // If the operands are legal vectors, leave them alone. 13934 if (TLI.isTypeLegal(OpVT)) 13935 return SDValue(); 13936 13937 SDLoc DL(N); 13938 EVT VT = N->getValueType(0); 13939 SmallVector<SDValue, 8> Ops; 13940 13941 EVT SVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits()); 13942 SDValue ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT); 13943 13944 // Keep track of what we encounter. 13945 bool AnyInteger = false; 13946 bool AnyFP = false; 13947 for (const SDValue &Op : N->ops()) { 13948 if (ISD::BITCAST == Op.getOpcode() && 13949 !Op.getOperand(0).getValueType().isVector()) 13950 Ops.push_back(Op.getOperand(0)); 13951 else if (ISD::UNDEF == Op.getOpcode()) 13952 Ops.push_back(ScalarUndef); 13953 else 13954 return SDValue(); 13955 13956 // Note whether we encounter an integer or floating point scalar. 13957 // If it's neither, bail out, it could be something weird like x86mmx. 13958 EVT LastOpVT = Ops.back().getValueType(); 13959 if (LastOpVT.isFloatingPoint()) 13960 AnyFP = true; 13961 else if (LastOpVT.isInteger()) 13962 AnyInteger = true; 13963 else 13964 return SDValue(); 13965 } 13966 13967 // If any of the operands is a floating point scalar bitcast to a vector, 13968 // use floating point types throughout, and bitcast everything. 13969 // Replace UNDEFs by another scalar UNDEF node, of the final desired type. 13970 if (AnyFP) { 13971 SVT = EVT::getFloatingPointVT(OpVT.getSizeInBits()); 13972 ScalarUndef = DAG.getNode(ISD::UNDEF, DL, SVT); 13973 if (AnyInteger) { 13974 for (SDValue &Op : Ops) { 13975 if (Op.getValueType() == SVT) 13976 continue; 13977 if (Op.isUndef()) 13978 Op = ScalarUndef; 13979 else 13980 Op = DAG.getBitcast(SVT, Op); 13981 } 13982 } 13983 } 13984 13985 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), SVT, 13986 VT.getSizeInBits() / SVT.getSizeInBits()); 13987 return DAG.getBitcast(VT, DAG.getBuildVector(VecVT, DL, Ops)); 13988 } 13989 13990 // Check to see if this is a CONCAT_VECTORS of a bunch of EXTRACT_SUBVECTOR 13991 // operations. If so, and if the EXTRACT_SUBVECTOR vector inputs come from at 13992 // most two distinct vectors the same size as the result, attempt to turn this 13993 // into a legal shuffle. 13994 static SDValue combineConcatVectorOfExtracts(SDNode *N, SelectionDAG &DAG) { 13995 EVT VT = N->getValueType(0); 13996 EVT OpVT = N->getOperand(0).getValueType(); 13997 int NumElts = VT.getVectorNumElements(); 13998 int NumOpElts = OpVT.getVectorNumElements(); 13999 14000 SDValue SV0 = DAG.getUNDEF(VT), SV1 = DAG.getUNDEF(VT); 14001 SmallVector<int, 8> Mask; 14002 14003 for (SDValue Op : N->ops()) { 14004 // Peek through any bitcast. 14005 while (Op.getOpcode() == ISD::BITCAST) 14006 Op = Op.getOperand(0); 14007 14008 // UNDEF nodes convert to UNDEF shuffle mask values. 14009 if (Op.isUndef()) { 14010 Mask.append((unsigned)NumOpElts, -1); 14011 continue; 14012 } 14013 14014 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR) 14015 return SDValue(); 14016 14017 // What vector are we extracting the subvector from and at what index? 14018 SDValue ExtVec = Op.getOperand(0); 14019 14020 // We want the EVT of the original extraction to correctly scale the 14021 // extraction index. 14022 EVT ExtVT = ExtVec.getValueType(); 14023 14024 // Peek through any bitcast. 14025 while (ExtVec.getOpcode() == ISD::BITCAST) 14026 ExtVec = ExtVec.getOperand(0); 14027 14028 // UNDEF nodes convert to UNDEF shuffle mask values. 14029 if (ExtVec.isUndef()) { 14030 Mask.append((unsigned)NumOpElts, -1); 14031 continue; 14032 } 14033 14034 if (!isa<ConstantSDNode>(Op.getOperand(1))) 14035 return SDValue(); 14036 int ExtIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 14037 14038 // Ensure that we are extracting a subvector from a vector the same 14039 // size as the result. 14040 if (ExtVT.getSizeInBits() != VT.getSizeInBits()) 14041 return SDValue(); 14042 14043 // Scale the subvector index to account for any bitcast. 14044 int NumExtElts = ExtVT.getVectorNumElements(); 14045 if (0 == (NumExtElts % NumElts)) 14046 ExtIdx /= (NumExtElts / NumElts); 14047 else if (0 == (NumElts % NumExtElts)) 14048 ExtIdx *= (NumElts / NumExtElts); 14049 else 14050 return SDValue(); 14051 14052 // At most we can reference 2 inputs in the final shuffle. 14053 if (SV0.isUndef() || SV0 == ExtVec) { 14054 SV0 = ExtVec; 14055 for (int i = 0; i != NumOpElts; ++i) 14056 Mask.push_back(i + ExtIdx); 14057 } else if (SV1.isUndef() || SV1 == ExtVec) { 14058 SV1 = ExtVec; 14059 for (int i = 0; i != NumOpElts; ++i) 14060 Mask.push_back(i + ExtIdx + NumElts); 14061 } else { 14062 return SDValue(); 14063 } 14064 } 14065 14066 if (!DAG.getTargetLoweringInfo().isShuffleMaskLegal(Mask, VT)) 14067 return SDValue(); 14068 14069 return DAG.getVectorShuffle(VT, SDLoc(N), DAG.getBitcast(VT, SV0), 14070 DAG.getBitcast(VT, SV1), Mask); 14071 } 14072 14073 SDValue DAGCombiner::visitCONCAT_VECTORS(SDNode *N) { 14074 // If we only have one input vector, we don't need to do any concatenation. 14075 if (N->getNumOperands() == 1) 14076 return N->getOperand(0); 14077 14078 // Check if all of the operands are undefs. 14079 EVT VT = N->getValueType(0); 14080 if (ISD::allOperandsUndef(N)) 14081 return DAG.getUNDEF(VT); 14082 14083 // Optimize concat_vectors where all but the first of the vectors are undef. 14084 if (std::all_of(std::next(N->op_begin()), N->op_end(), [](const SDValue &Op) { 14085 return Op.isUndef(); 14086 })) { 14087 SDValue In = N->getOperand(0); 14088 assert(In.getValueType().isVector() && "Must concat vectors"); 14089 14090 // Transform: concat_vectors(scalar, undef) -> scalar_to_vector(sclr). 14091 if (In->getOpcode() == ISD::BITCAST && 14092 !In->getOperand(0)->getValueType(0).isVector()) { 14093 SDValue Scalar = In->getOperand(0); 14094 14095 // If the bitcast type isn't legal, it might be a trunc of a legal type; 14096 // look through the trunc so we can still do the transform: 14097 // concat_vectors(trunc(scalar), undef) -> scalar_to_vector(scalar) 14098 if (Scalar->getOpcode() == ISD::TRUNCATE && 14099 !TLI.isTypeLegal(Scalar.getValueType()) && 14100 TLI.isTypeLegal(Scalar->getOperand(0).getValueType())) 14101 Scalar = Scalar->getOperand(0); 14102 14103 EVT SclTy = Scalar->getValueType(0); 14104 14105 if (!SclTy.isFloatingPoint() && !SclTy.isInteger()) 14106 return SDValue(); 14107 14108 unsigned VNTNumElms = VT.getSizeInBits() / SclTy.getSizeInBits(); 14109 if (VNTNumElms < 2) 14110 return SDValue(); 14111 14112 EVT NVT = EVT::getVectorVT(*DAG.getContext(), SclTy, VNTNumElms); 14113 if (!TLI.isTypeLegal(NVT) || !TLI.isTypeLegal(Scalar.getValueType())) 14114 return SDValue(); 14115 14116 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), NVT, Scalar); 14117 return DAG.getBitcast(VT, Res); 14118 } 14119 } 14120 14121 // Fold any combination of BUILD_VECTOR or UNDEF nodes into one BUILD_VECTOR. 14122 // We have already tested above for an UNDEF only concatenation. 14123 // fold (concat_vectors (BUILD_VECTOR A, B, ...), (BUILD_VECTOR C, D, ...)) 14124 // -> (BUILD_VECTOR A, B, ..., C, D, ...) 14125 auto IsBuildVectorOrUndef = [](const SDValue &Op) { 14126 return ISD::UNDEF == Op.getOpcode() || ISD::BUILD_VECTOR == Op.getOpcode(); 14127 }; 14128 if (llvm::all_of(N->ops(), IsBuildVectorOrUndef)) { 14129 SmallVector<SDValue, 8> Opnds; 14130 EVT SVT = VT.getScalarType(); 14131 14132 EVT MinVT = SVT; 14133 if (!SVT.isFloatingPoint()) { 14134 // If BUILD_VECTOR are from built from integer, they may have different 14135 // operand types. Get the smallest type and truncate all operands to it. 14136 bool FoundMinVT = false; 14137 for (const SDValue &Op : N->ops()) 14138 if (ISD::BUILD_VECTOR == Op.getOpcode()) { 14139 EVT OpSVT = Op.getOperand(0)->getValueType(0); 14140 MinVT = (!FoundMinVT || OpSVT.bitsLE(MinVT)) ? OpSVT : MinVT; 14141 FoundMinVT = true; 14142 } 14143 assert(FoundMinVT && "Concat vector type mismatch"); 14144 } 14145 14146 for (const SDValue &Op : N->ops()) { 14147 EVT OpVT = Op.getValueType(); 14148 unsigned NumElts = OpVT.getVectorNumElements(); 14149 14150 if (ISD::UNDEF == Op.getOpcode()) 14151 Opnds.append(NumElts, DAG.getUNDEF(MinVT)); 14152 14153 if (ISD::BUILD_VECTOR == Op.getOpcode()) { 14154 if (SVT.isFloatingPoint()) { 14155 assert(SVT == OpVT.getScalarType() && "Concat vector type mismatch"); 14156 Opnds.append(Op->op_begin(), Op->op_begin() + NumElts); 14157 } else { 14158 for (unsigned i = 0; i != NumElts; ++i) 14159 Opnds.push_back( 14160 DAG.getNode(ISD::TRUNCATE, SDLoc(N), MinVT, Op.getOperand(i))); 14161 } 14162 } 14163 } 14164 14165 assert(VT.getVectorNumElements() == Opnds.size() && 14166 "Concat vector type mismatch"); 14167 return DAG.getBuildVector(VT, SDLoc(N), Opnds); 14168 } 14169 14170 // Fold CONCAT_VECTORS of only bitcast scalars (or undef) to BUILD_VECTOR. 14171 if (SDValue V = combineConcatVectorOfScalars(N, DAG)) 14172 return V; 14173 14174 // Fold CONCAT_VECTORS of EXTRACT_SUBVECTOR (or undef) to VECTOR_SHUFFLE. 14175 if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT)) 14176 if (SDValue V = combineConcatVectorOfExtracts(N, DAG)) 14177 return V; 14178 14179 // Type legalization of vectors and DAG canonicalization of SHUFFLE_VECTOR 14180 // nodes often generate nop CONCAT_VECTOR nodes. 14181 // Scan the CONCAT_VECTOR operands and look for a CONCAT operations that 14182 // place the incoming vectors at the exact same location. 14183 SDValue SingleSource = SDValue(); 14184 unsigned PartNumElem = N->getOperand(0).getValueType().getVectorNumElements(); 14185 14186 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 14187 SDValue Op = N->getOperand(i); 14188 14189 if (Op.isUndef()) 14190 continue; 14191 14192 // Check if this is the identity extract: 14193 if (Op.getOpcode() != ISD::EXTRACT_SUBVECTOR) 14194 return SDValue(); 14195 14196 // Find the single incoming vector for the extract_subvector. 14197 if (SingleSource.getNode()) { 14198 if (Op.getOperand(0) != SingleSource) 14199 return SDValue(); 14200 } else { 14201 SingleSource = Op.getOperand(0); 14202 14203 // Check the source type is the same as the type of the result. 14204 // If not, this concat may extend the vector, so we can not 14205 // optimize it away. 14206 if (SingleSource.getValueType() != N->getValueType(0)) 14207 return SDValue(); 14208 } 14209 14210 unsigned IdentityIndex = i * PartNumElem; 14211 ConstantSDNode *CS = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 14212 // The extract index must be constant. 14213 if (!CS) 14214 return SDValue(); 14215 14216 // Check that we are reading from the identity index. 14217 if (CS->getZExtValue() != IdentityIndex) 14218 return SDValue(); 14219 } 14220 14221 if (SingleSource.getNode()) 14222 return SingleSource; 14223 14224 return SDValue(); 14225 } 14226 14227 SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) { 14228 EVT NVT = N->getValueType(0); 14229 SDValue V = N->getOperand(0); 14230 14231 // Extract from UNDEF is UNDEF. 14232 if (V.isUndef()) 14233 return DAG.getUNDEF(NVT); 14234 14235 // Combine: 14236 // (extract_subvec (concat V1, V2, ...), i) 14237 // Into: 14238 // Vi if possible 14239 // Only operand 0 is checked as 'concat' assumes all inputs of the same 14240 // type. 14241 if (V->getOpcode() == ISD::CONCAT_VECTORS && 14242 isa<ConstantSDNode>(N->getOperand(1)) && 14243 V->getOperand(0).getValueType() == NVT) { 14244 unsigned Idx = N->getConstantOperandVal(1); 14245 unsigned NumElems = NVT.getVectorNumElements(); 14246 assert((Idx % NumElems) == 0 && 14247 "IDX in concat is not a multiple of the result vector length."); 14248 return V->getOperand(Idx / NumElems); 14249 } 14250 14251 // Skip bitcasting 14252 if (V->getOpcode() == ISD::BITCAST) 14253 V = V.getOperand(0); 14254 14255 if (V->getOpcode() == ISD::INSERT_SUBVECTOR) { 14256 // Handle only simple case where vector being inserted and vector 14257 // being extracted are of same size. 14258 EVT SmallVT = V->getOperand(1).getValueType(); 14259 if (!NVT.bitsEq(SmallVT)) 14260 return SDValue(); 14261 14262 // Only handle cases where both indexes are constants. 14263 ConstantSDNode *ExtIdx = dyn_cast<ConstantSDNode>(N->getOperand(1)); 14264 ConstantSDNode *InsIdx = dyn_cast<ConstantSDNode>(V->getOperand(2)); 14265 14266 if (InsIdx && ExtIdx) { 14267 // Combine: 14268 // (extract_subvec (insert_subvec V1, V2, InsIdx), ExtIdx) 14269 // Into: 14270 // indices are equal or bit offsets are equal => V1 14271 // otherwise => (extract_subvec V1, ExtIdx) 14272 if (InsIdx->getZExtValue() * SmallVT.getScalarSizeInBits() == 14273 ExtIdx->getZExtValue() * NVT.getScalarSizeInBits()) 14274 return DAG.getBitcast(NVT, V->getOperand(1)); 14275 return DAG.getNode( 14276 ISD::EXTRACT_SUBVECTOR, SDLoc(N), NVT, 14277 DAG.getBitcast(N->getOperand(0).getValueType(), V->getOperand(0)), 14278 N->getOperand(1)); 14279 } 14280 } 14281 14282 return SDValue(); 14283 } 14284 14285 static SDValue simplifyShuffleOperandRecursively(SmallBitVector &UsedElements, 14286 SDValue V, SelectionDAG &DAG) { 14287 SDLoc DL(V); 14288 EVT VT = V.getValueType(); 14289 14290 switch (V.getOpcode()) { 14291 default: 14292 return V; 14293 14294 case ISD::CONCAT_VECTORS: { 14295 EVT OpVT = V->getOperand(0).getValueType(); 14296 int OpSize = OpVT.getVectorNumElements(); 14297 SmallBitVector OpUsedElements(OpSize, false); 14298 bool FoundSimplification = false; 14299 SmallVector<SDValue, 4> NewOps; 14300 NewOps.reserve(V->getNumOperands()); 14301 for (int i = 0, NumOps = V->getNumOperands(); i < NumOps; ++i) { 14302 SDValue Op = V->getOperand(i); 14303 bool OpUsed = false; 14304 for (int j = 0; j < OpSize; ++j) 14305 if (UsedElements[i * OpSize + j]) { 14306 OpUsedElements[j] = true; 14307 OpUsed = true; 14308 } 14309 NewOps.push_back( 14310 OpUsed ? simplifyShuffleOperandRecursively(OpUsedElements, Op, DAG) 14311 : DAG.getUNDEF(OpVT)); 14312 FoundSimplification |= Op == NewOps.back(); 14313 OpUsedElements.reset(); 14314 } 14315 if (FoundSimplification) 14316 V = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, NewOps); 14317 return V; 14318 } 14319 14320 case ISD::INSERT_SUBVECTOR: { 14321 SDValue BaseV = V->getOperand(0); 14322 SDValue SubV = V->getOperand(1); 14323 auto *IdxN = dyn_cast<ConstantSDNode>(V->getOperand(2)); 14324 if (!IdxN) 14325 return V; 14326 14327 int SubSize = SubV.getValueType().getVectorNumElements(); 14328 int Idx = IdxN->getZExtValue(); 14329 bool SubVectorUsed = false; 14330 SmallBitVector SubUsedElements(SubSize, false); 14331 for (int i = 0; i < SubSize; ++i) 14332 if (UsedElements[i + Idx]) { 14333 SubVectorUsed = true; 14334 SubUsedElements[i] = true; 14335 UsedElements[i + Idx] = false; 14336 } 14337 14338 // Now recurse on both the base and sub vectors. 14339 SDValue SimplifiedSubV = 14340 SubVectorUsed 14341 ? simplifyShuffleOperandRecursively(SubUsedElements, SubV, DAG) 14342 : DAG.getUNDEF(SubV.getValueType()); 14343 SDValue SimplifiedBaseV = simplifyShuffleOperandRecursively(UsedElements, BaseV, DAG); 14344 if (SimplifiedSubV != SubV || SimplifiedBaseV != BaseV) 14345 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 14346 SimplifiedBaseV, SimplifiedSubV, V->getOperand(2)); 14347 return V; 14348 } 14349 } 14350 } 14351 14352 static SDValue simplifyShuffleOperands(ShuffleVectorSDNode *SVN, SDValue N0, 14353 SDValue N1, SelectionDAG &DAG) { 14354 EVT VT = SVN->getValueType(0); 14355 int NumElts = VT.getVectorNumElements(); 14356 SmallBitVector N0UsedElements(NumElts, false), N1UsedElements(NumElts, false); 14357 for (int M : SVN->getMask()) 14358 if (M >= 0 && M < NumElts) 14359 N0UsedElements[M] = true; 14360 else if (M >= NumElts) 14361 N1UsedElements[M - NumElts] = true; 14362 14363 SDValue S0 = simplifyShuffleOperandRecursively(N0UsedElements, N0, DAG); 14364 SDValue S1 = simplifyShuffleOperandRecursively(N1UsedElements, N1, DAG); 14365 if (S0 == N0 && S1 == N1) 14366 return SDValue(); 14367 14368 return DAG.getVectorShuffle(VT, SDLoc(SVN), S0, S1, SVN->getMask()); 14369 } 14370 14371 // Tries to turn a shuffle of two CONCAT_VECTORS into a single concat, 14372 // or turn a shuffle of a single concat into simpler shuffle then concat. 14373 static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) { 14374 EVT VT = N->getValueType(0); 14375 unsigned NumElts = VT.getVectorNumElements(); 14376 14377 SDValue N0 = N->getOperand(0); 14378 SDValue N1 = N->getOperand(1); 14379 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 14380 14381 SmallVector<SDValue, 4> Ops; 14382 EVT ConcatVT = N0.getOperand(0).getValueType(); 14383 unsigned NumElemsPerConcat = ConcatVT.getVectorNumElements(); 14384 unsigned NumConcats = NumElts / NumElemsPerConcat; 14385 14386 // Special case: shuffle(concat(A,B)) can be more efficiently represented 14387 // as concat(shuffle(A,B),UNDEF) if the shuffle doesn't set any of the high 14388 // half vector elements. 14389 if (NumElemsPerConcat * 2 == NumElts && N1.isUndef() && 14390 std::all_of(SVN->getMask().begin() + NumElemsPerConcat, 14391 SVN->getMask().end(), [](int i) { return i == -1; })) { 14392 N0 = DAG.getVectorShuffle(ConcatVT, SDLoc(N), N0.getOperand(0), N0.getOperand(1), 14393 makeArrayRef(SVN->getMask().begin(), NumElemsPerConcat)); 14394 N1 = DAG.getUNDEF(ConcatVT); 14395 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, N0, N1); 14396 } 14397 14398 // Look at every vector that's inserted. We're looking for exact 14399 // subvector-sized copies from a concatenated vector 14400 for (unsigned I = 0; I != NumConcats; ++I) { 14401 // Make sure we're dealing with a copy. 14402 unsigned Begin = I * NumElemsPerConcat; 14403 bool AllUndef = true, NoUndef = true; 14404 for (unsigned J = Begin; J != Begin + NumElemsPerConcat; ++J) { 14405 if (SVN->getMaskElt(J) >= 0) 14406 AllUndef = false; 14407 else 14408 NoUndef = false; 14409 } 14410 14411 if (NoUndef) { 14412 if (SVN->getMaskElt(Begin) % NumElemsPerConcat != 0) 14413 return SDValue(); 14414 14415 for (unsigned J = 1; J != NumElemsPerConcat; ++J) 14416 if (SVN->getMaskElt(Begin + J - 1) + 1 != SVN->getMaskElt(Begin + J)) 14417 return SDValue(); 14418 14419 unsigned FirstElt = SVN->getMaskElt(Begin) / NumElemsPerConcat; 14420 if (FirstElt < N0.getNumOperands()) 14421 Ops.push_back(N0.getOperand(FirstElt)); 14422 else 14423 Ops.push_back(N1.getOperand(FirstElt - N0.getNumOperands())); 14424 14425 } else if (AllUndef) { 14426 Ops.push_back(DAG.getUNDEF(N0.getOperand(0).getValueType())); 14427 } else { // Mixed with general masks and undefs, can't do optimization. 14428 return SDValue(); 14429 } 14430 } 14431 14432 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops); 14433 } 14434 14435 // Attempt to combine a shuffle of 2 inputs of 'scalar sources' - 14436 // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR. 14437 // 14438 // SHUFFLE(BUILD_VECTOR(), BUILD_VECTOR()) -> BUILD_VECTOR() is always 14439 // a simplification in some sense, but it isn't appropriate in general: some 14440 // BUILD_VECTORs are substantially cheaper than others. The general case 14441 // of a BUILD_VECTOR requires inserting each element individually (or 14442 // performing the equivalent in a temporary stack variable). A BUILD_VECTOR of 14443 // all constants is a single constant pool load. A BUILD_VECTOR where each 14444 // element is identical is a splat. A BUILD_VECTOR where most of the operands 14445 // are undef lowers to a small number of element insertions. 14446 // 14447 // To deal with this, we currently use a bunch of mostly arbitrary heuristics. 14448 // We don't fold shuffles where one side is a non-zero constant, and we don't 14449 // fold shuffles if the resulting BUILD_VECTOR would have duplicate 14450 // non-constant operands. This seems to work out reasonably well in practice. 14451 static SDValue combineShuffleOfScalars(ShuffleVectorSDNode *SVN, 14452 SelectionDAG &DAG, 14453 const TargetLowering &TLI) { 14454 EVT VT = SVN->getValueType(0); 14455 unsigned NumElts = VT.getVectorNumElements(); 14456 SDValue N0 = SVN->getOperand(0); 14457 SDValue N1 = SVN->getOperand(1); 14458 14459 if (!N0->hasOneUse() || !N1->hasOneUse()) 14460 return SDValue(); 14461 // If only one of N1,N2 is constant, bail out if it is not ALL_ZEROS as 14462 // discussed above. 14463 if (!N1.isUndef()) { 14464 bool N0AnyConst = isAnyConstantBuildVector(N0.getNode()); 14465 bool N1AnyConst = isAnyConstantBuildVector(N1.getNode()); 14466 if (N0AnyConst && !N1AnyConst && !ISD::isBuildVectorAllZeros(N0.getNode())) 14467 return SDValue(); 14468 if (!N0AnyConst && N1AnyConst && !ISD::isBuildVectorAllZeros(N1.getNode())) 14469 return SDValue(); 14470 } 14471 14472 SmallVector<SDValue, 8> Ops; 14473 SmallSet<SDValue, 16> DuplicateOps; 14474 for (int M : SVN->getMask()) { 14475 SDValue Op = DAG.getUNDEF(VT.getScalarType()); 14476 if (M >= 0) { 14477 int Idx = M < (int)NumElts ? M : M - NumElts; 14478 SDValue &S = (M < (int)NumElts ? N0 : N1); 14479 if (S.getOpcode() == ISD::BUILD_VECTOR) { 14480 Op = S.getOperand(Idx); 14481 } else if (S.getOpcode() == ISD::SCALAR_TO_VECTOR) { 14482 if (Idx == 0) 14483 Op = S.getOperand(0); 14484 } else { 14485 // Operand can't be combined - bail out. 14486 return SDValue(); 14487 } 14488 } 14489 14490 // Don't duplicate a non-constant BUILD_VECTOR operand; semantically, this is 14491 // fine, but it's likely to generate low-quality code if the target can't 14492 // reconstruct an appropriate shuffle. 14493 if (!Op.isUndef() && !isa<ConstantSDNode>(Op) && !isa<ConstantFPSDNode>(Op)) 14494 if (!DuplicateOps.insert(Op).second) 14495 return SDValue(); 14496 14497 Ops.push_back(Op); 14498 } 14499 // BUILD_VECTOR requires all inputs to be of the same type, find the 14500 // maximum type and extend them all. 14501 EVT SVT = VT.getScalarType(); 14502 if (SVT.isInteger()) 14503 for (SDValue &Op : Ops) 14504 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); 14505 if (SVT != VT.getScalarType()) 14506 for (SDValue &Op : Ops) 14507 Op = TLI.isZExtFree(Op.getValueType(), SVT) 14508 ? DAG.getZExtOrTrunc(Op, SDLoc(SVN), SVT) 14509 : DAG.getSExtOrTrunc(Op, SDLoc(SVN), SVT); 14510 return DAG.getBuildVector(VT, SDLoc(SVN), Ops); 14511 } 14512 14513 // Match shuffles that can be converted to any_vector_extend_in_reg. 14514 // This is often generated during legalization. 14515 // e.g. v4i32 <0,u,1,u> -> (v2i64 any_vector_extend_in_reg(v4i32 src)) 14516 // TODO Add support for ZERO_EXTEND_VECTOR_INREG when we have a test case. 14517 SDValue combineShuffleToVectorExtend(ShuffleVectorSDNode *SVN, 14518 SelectionDAG &DAG, 14519 const TargetLowering &TLI, 14520 bool LegalOperations) { 14521 EVT VT = SVN->getValueType(0); 14522 bool IsBigEndian = DAG.getDataLayout().isBigEndian(); 14523 14524 // TODO Add support for big-endian when we have a test case. 14525 if (!VT.isInteger() || IsBigEndian) 14526 return SDValue(); 14527 14528 unsigned NumElts = VT.getVectorNumElements(); 14529 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 14530 ArrayRef<int> Mask = SVN->getMask(); 14531 SDValue N0 = SVN->getOperand(0); 14532 14533 // shuffle<0,-1,1,-1> == (v2i64 anyextend_vector_inreg(v4i32)) 14534 auto isAnyExtend = [&Mask, &NumElts](unsigned Scale) { 14535 for (unsigned i = 0; i != NumElts; ++i) { 14536 if (Mask[i] < 0) 14537 continue; 14538 if ((i % Scale) == 0 && Mask[i] == (int)(i / Scale)) 14539 continue; 14540 return false; 14541 } 14542 return true; 14543 }; 14544 14545 // Attempt to match a '*_extend_vector_inreg' shuffle, we just search for 14546 // power-of-2 extensions as they are the most likely. 14547 for (unsigned Scale = 2; Scale < NumElts; Scale *= 2) { 14548 if (!isAnyExtend(Scale)) 14549 continue; 14550 14551 EVT OutSVT = EVT::getIntegerVT(*DAG.getContext(), EltSizeInBits * Scale); 14552 EVT OutVT = EVT::getVectorVT(*DAG.getContext(), OutSVT, NumElts / Scale); 14553 if (!LegalOperations || 14554 TLI.isOperationLegalOrCustom(ISD::ANY_EXTEND_VECTOR_INREG, OutVT)) 14555 return DAG.getBitcast(VT, 14556 DAG.getAnyExtendVectorInReg(N0, SDLoc(SVN), OutVT)); 14557 } 14558 14559 return SDValue(); 14560 } 14561 14562 // Detect 'truncate_vector_inreg' style shuffles that pack the lower parts of 14563 // each source element of a large type into the lowest elements of a smaller 14564 // destination type. This is often generated during legalization. 14565 // If the source node itself was a '*_extend_vector_inreg' node then we should 14566 // then be able to remove it. 14567 SDValue combineTruncationShuffle(ShuffleVectorSDNode *SVN, SelectionDAG &DAG) { 14568 EVT VT = SVN->getValueType(0); 14569 bool IsBigEndian = DAG.getDataLayout().isBigEndian(); 14570 14571 // TODO Add support for big-endian when we have a test case. 14572 if (!VT.isInteger() || IsBigEndian) 14573 return SDValue(); 14574 14575 SDValue N0 = SVN->getOperand(0); 14576 while (N0.getOpcode() == ISD::BITCAST) 14577 N0 = N0.getOperand(0); 14578 14579 unsigned Opcode = N0.getOpcode(); 14580 if (Opcode != ISD::ANY_EXTEND_VECTOR_INREG && 14581 Opcode != ISD::SIGN_EXTEND_VECTOR_INREG && 14582 Opcode != ISD::ZERO_EXTEND_VECTOR_INREG) 14583 return SDValue(); 14584 14585 SDValue N00 = N0.getOperand(0); 14586 ArrayRef<int> Mask = SVN->getMask(); 14587 unsigned NumElts = VT.getVectorNumElements(); 14588 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 14589 unsigned ExtSrcSizeInBits = N00.getScalarValueSizeInBits(); 14590 14591 // (v4i32 truncate_vector_inreg(v2i64)) == shuffle<0,2-1,-1> 14592 // (v8i16 truncate_vector_inreg(v4i32)) == shuffle<0,2,4,6,-1,-1,-1,-1> 14593 // (v8i16 truncate_vector_inreg(v2i64)) == shuffle<0,4,-1,-1,-1,-1,-1,-1> 14594 auto isTruncate = [&Mask, &NumElts](unsigned Scale) { 14595 for (unsigned i = 0; i != NumElts; ++i) { 14596 if (Mask[i] < 0) 14597 continue; 14598 if ((i * Scale) < NumElts && Mask[i] == (int)(i * Scale)) 14599 continue; 14600 return false; 14601 } 14602 return true; 14603 }; 14604 14605 // At the moment we just handle the case where we've truncated back to the 14606 // same size as before the extension. 14607 // TODO: handle more extension/truncation cases as cases arise. 14608 if (EltSizeInBits != ExtSrcSizeInBits) 14609 return SDValue(); 14610 14611 // Attempt to match a 'truncate_vector_inreg' shuffle, we just search for 14612 // power-of-2 truncations as they are the most likely. 14613 for (unsigned Scale = 2; Scale < NumElts; Scale *= 2) 14614 if (isTruncate(Scale)) 14615 return DAG.getBitcast(VT, N00); 14616 14617 return SDValue(); 14618 } 14619 14620 SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) { 14621 EVT VT = N->getValueType(0); 14622 unsigned NumElts = VT.getVectorNumElements(); 14623 14624 SDValue N0 = N->getOperand(0); 14625 SDValue N1 = N->getOperand(1); 14626 14627 assert(N0.getValueType() == VT && "Vector shuffle must be normalized in DAG"); 14628 14629 // Canonicalize shuffle undef, undef -> undef 14630 if (N0.isUndef() && N1.isUndef()) 14631 return DAG.getUNDEF(VT); 14632 14633 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 14634 14635 // Canonicalize shuffle v, v -> v, undef 14636 if (N0 == N1) { 14637 SmallVector<int, 8> NewMask; 14638 for (unsigned i = 0; i != NumElts; ++i) { 14639 int Idx = SVN->getMaskElt(i); 14640 if (Idx >= (int)NumElts) Idx -= NumElts; 14641 NewMask.push_back(Idx); 14642 } 14643 return DAG.getVectorShuffle(VT, SDLoc(N), N0, DAG.getUNDEF(VT), NewMask); 14644 } 14645 14646 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask. 14647 if (N0.isUndef()) 14648 return DAG.getCommutedVectorShuffle(*SVN); 14649 14650 // Remove references to rhs if it is undef 14651 if (N1.isUndef()) { 14652 bool Changed = false; 14653 SmallVector<int, 8> NewMask; 14654 for (unsigned i = 0; i != NumElts; ++i) { 14655 int Idx = SVN->getMaskElt(i); 14656 if (Idx >= (int)NumElts) { 14657 Idx = -1; 14658 Changed = true; 14659 } 14660 NewMask.push_back(Idx); 14661 } 14662 if (Changed) 14663 return DAG.getVectorShuffle(VT, SDLoc(N), N0, N1, NewMask); 14664 } 14665 14666 // If it is a splat, check if the argument vector is another splat or a 14667 // build_vector. 14668 if (SVN->isSplat() && SVN->getSplatIndex() < (int)NumElts) { 14669 SDNode *V = N0.getNode(); 14670 14671 // If this is a bit convert that changes the element type of the vector but 14672 // not the number of vector elements, look through it. Be careful not to 14673 // look though conversions that change things like v4f32 to v2f64. 14674 if (V->getOpcode() == ISD::BITCAST) { 14675 SDValue ConvInput = V->getOperand(0); 14676 if (ConvInput.getValueType().isVector() && 14677 ConvInput.getValueType().getVectorNumElements() == NumElts) 14678 V = ConvInput.getNode(); 14679 } 14680 14681 if (V->getOpcode() == ISD::BUILD_VECTOR) { 14682 assert(V->getNumOperands() == NumElts && 14683 "BUILD_VECTOR has wrong number of operands"); 14684 SDValue Base; 14685 bool AllSame = true; 14686 for (unsigned i = 0; i != NumElts; ++i) { 14687 if (!V->getOperand(i).isUndef()) { 14688 Base = V->getOperand(i); 14689 break; 14690 } 14691 } 14692 // Splat of <u, u, u, u>, return <u, u, u, u> 14693 if (!Base.getNode()) 14694 return N0; 14695 for (unsigned i = 0; i != NumElts; ++i) { 14696 if (V->getOperand(i) != Base) { 14697 AllSame = false; 14698 break; 14699 } 14700 } 14701 // Splat of <x, x, x, x>, return <x, x, x, x> 14702 if (AllSame) 14703 return N0; 14704 14705 // Canonicalize any other splat as a build_vector. 14706 const SDValue &Splatted = V->getOperand(SVN->getSplatIndex()); 14707 SmallVector<SDValue, 8> Ops(NumElts, Splatted); 14708 SDValue NewBV = DAG.getBuildVector(V->getValueType(0), SDLoc(N), Ops); 14709 14710 // We may have jumped through bitcasts, so the type of the 14711 // BUILD_VECTOR may not match the type of the shuffle. 14712 if (V->getValueType(0) != VT) 14713 NewBV = DAG.getBitcast(VT, NewBV); 14714 return NewBV; 14715 } 14716 } 14717 14718 // There are various patterns used to build up a vector from smaller vectors, 14719 // subvectors, or elements. Scan chains of these and replace unused insertions 14720 // or components with undef. 14721 if (SDValue S = simplifyShuffleOperands(SVN, N0, N1, DAG)) 14722 return S; 14723 14724 // Match shuffles that can be converted to any_vector_extend_in_reg. 14725 if (SDValue V = combineShuffleToVectorExtend(SVN, DAG, TLI, LegalOperations)) 14726 return V; 14727 14728 // Combine "truncate_vector_in_reg" style shuffles. 14729 if (SDValue V = combineTruncationShuffle(SVN, DAG)) 14730 return V; 14731 14732 if (N0.getOpcode() == ISD::CONCAT_VECTORS && 14733 Level < AfterLegalizeVectorOps && 14734 (N1.isUndef() || 14735 (N1.getOpcode() == ISD::CONCAT_VECTORS && 14736 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) { 14737 if (SDValue V = partitionShuffleOfConcats(N, DAG)) 14738 return V; 14739 } 14740 14741 // Attempt to combine a shuffle of 2 inputs of 'scalar sources' - 14742 // BUILD_VECTOR or SCALAR_TO_VECTOR into a single BUILD_VECTOR. 14743 if (Level < AfterLegalizeVectorOps && TLI.isTypeLegal(VT)) 14744 if (SDValue Res = combineShuffleOfScalars(SVN, DAG, TLI)) 14745 return Res; 14746 14747 // If this shuffle only has a single input that is a bitcasted shuffle, 14748 // attempt to merge the 2 shuffles and suitably bitcast the inputs/output 14749 // back to their original types. 14750 if (N0.getOpcode() == ISD::BITCAST && N0.hasOneUse() && 14751 N1.isUndef() && Level < AfterLegalizeVectorOps && 14752 TLI.isTypeLegal(VT)) { 14753 14754 // Peek through the bitcast only if there is one user. 14755 SDValue BC0 = N0; 14756 while (BC0.getOpcode() == ISD::BITCAST) { 14757 if (!BC0.hasOneUse()) 14758 break; 14759 BC0 = BC0.getOperand(0); 14760 } 14761 14762 auto ScaleShuffleMask = [](ArrayRef<int> Mask, int Scale) { 14763 if (Scale == 1) 14764 return SmallVector<int, 8>(Mask.begin(), Mask.end()); 14765 14766 SmallVector<int, 8> NewMask; 14767 for (int M : Mask) 14768 for (int s = 0; s != Scale; ++s) 14769 NewMask.push_back(M < 0 ? -1 : Scale * M + s); 14770 return NewMask; 14771 }; 14772 14773 if (BC0.getOpcode() == ISD::VECTOR_SHUFFLE && BC0.hasOneUse()) { 14774 EVT SVT = VT.getScalarType(); 14775 EVT InnerVT = BC0->getValueType(0); 14776 EVT InnerSVT = InnerVT.getScalarType(); 14777 14778 // Determine which shuffle works with the smaller scalar type. 14779 EVT ScaleVT = SVT.bitsLT(InnerSVT) ? VT : InnerVT; 14780 EVT ScaleSVT = ScaleVT.getScalarType(); 14781 14782 if (TLI.isTypeLegal(ScaleVT) && 14783 0 == (InnerSVT.getSizeInBits() % ScaleSVT.getSizeInBits()) && 14784 0 == (SVT.getSizeInBits() % ScaleSVT.getSizeInBits())) { 14785 14786 int InnerScale = InnerSVT.getSizeInBits() / ScaleSVT.getSizeInBits(); 14787 int OuterScale = SVT.getSizeInBits() / ScaleSVT.getSizeInBits(); 14788 14789 // Scale the shuffle masks to the smaller scalar type. 14790 ShuffleVectorSDNode *InnerSVN = cast<ShuffleVectorSDNode>(BC0); 14791 SmallVector<int, 8> InnerMask = 14792 ScaleShuffleMask(InnerSVN->getMask(), InnerScale); 14793 SmallVector<int, 8> OuterMask = 14794 ScaleShuffleMask(SVN->getMask(), OuterScale); 14795 14796 // Merge the shuffle masks. 14797 SmallVector<int, 8> NewMask; 14798 for (int M : OuterMask) 14799 NewMask.push_back(M < 0 ? -1 : InnerMask[M]); 14800 14801 // Test for shuffle mask legality over both commutations. 14802 SDValue SV0 = BC0->getOperand(0); 14803 SDValue SV1 = BC0->getOperand(1); 14804 bool LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT); 14805 if (!LegalMask) { 14806 std::swap(SV0, SV1); 14807 ShuffleVectorSDNode::commuteMask(NewMask); 14808 LegalMask = TLI.isShuffleMaskLegal(NewMask, ScaleVT); 14809 } 14810 14811 if (LegalMask) { 14812 SV0 = DAG.getBitcast(ScaleVT, SV0); 14813 SV1 = DAG.getBitcast(ScaleVT, SV1); 14814 return DAG.getBitcast( 14815 VT, DAG.getVectorShuffle(ScaleVT, SDLoc(N), SV0, SV1, NewMask)); 14816 } 14817 } 14818 } 14819 } 14820 14821 // Canonicalize shuffles according to rules: 14822 // shuffle(A, shuffle(A, B)) -> shuffle(shuffle(A,B), A) 14823 // shuffle(B, shuffle(A, B)) -> shuffle(shuffle(A,B), B) 14824 // shuffle(B, shuffle(A, Undef)) -> shuffle(shuffle(A, Undef), B) 14825 if (N1.getOpcode() == ISD::VECTOR_SHUFFLE && 14826 N0.getOpcode() != ISD::VECTOR_SHUFFLE && Level < AfterLegalizeDAG && 14827 TLI.isTypeLegal(VT)) { 14828 // The incoming shuffle must be of the same type as the result of the 14829 // current shuffle. 14830 assert(N1->getOperand(0).getValueType() == VT && 14831 "Shuffle types don't match"); 14832 14833 SDValue SV0 = N1->getOperand(0); 14834 SDValue SV1 = N1->getOperand(1); 14835 bool HasSameOp0 = N0 == SV0; 14836 bool IsSV1Undef = SV1.isUndef(); 14837 if (HasSameOp0 || IsSV1Undef || N0 == SV1) 14838 // Commute the operands of this shuffle so that next rule 14839 // will trigger. 14840 return DAG.getCommutedVectorShuffle(*SVN); 14841 } 14842 14843 // Try to fold according to rules: 14844 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2) 14845 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2) 14846 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2) 14847 // Don't try to fold shuffles with illegal type. 14848 // Only fold if this shuffle is the only user of the other shuffle. 14849 if (N0.getOpcode() == ISD::VECTOR_SHUFFLE && N->isOnlyUserOf(N0.getNode()) && 14850 Level < AfterLegalizeDAG && TLI.isTypeLegal(VT)) { 14851 ShuffleVectorSDNode *OtherSV = cast<ShuffleVectorSDNode>(N0); 14852 14853 // Don't try to fold splats; they're likely to simplify somehow, or they 14854 // might be free. 14855 if (OtherSV->isSplat()) 14856 return SDValue(); 14857 14858 // The incoming shuffle must be of the same type as the result of the 14859 // current shuffle. 14860 assert(OtherSV->getOperand(0).getValueType() == VT && 14861 "Shuffle types don't match"); 14862 14863 SDValue SV0, SV1; 14864 SmallVector<int, 4> Mask; 14865 // Compute the combined shuffle mask for a shuffle with SV0 as the first 14866 // operand, and SV1 as the second operand. 14867 for (unsigned i = 0; i != NumElts; ++i) { 14868 int Idx = SVN->getMaskElt(i); 14869 if (Idx < 0) { 14870 // Propagate Undef. 14871 Mask.push_back(Idx); 14872 continue; 14873 } 14874 14875 SDValue CurrentVec; 14876 if (Idx < (int)NumElts) { 14877 // This shuffle index refers to the inner shuffle N0. Lookup the inner 14878 // shuffle mask to identify which vector is actually referenced. 14879 Idx = OtherSV->getMaskElt(Idx); 14880 if (Idx < 0) { 14881 // Propagate Undef. 14882 Mask.push_back(Idx); 14883 continue; 14884 } 14885 14886 CurrentVec = (Idx < (int) NumElts) ? OtherSV->getOperand(0) 14887 : OtherSV->getOperand(1); 14888 } else { 14889 // This shuffle index references an element within N1. 14890 CurrentVec = N1; 14891 } 14892 14893 // Simple case where 'CurrentVec' is UNDEF. 14894 if (CurrentVec.isUndef()) { 14895 Mask.push_back(-1); 14896 continue; 14897 } 14898 14899 // Canonicalize the shuffle index. We don't know yet if CurrentVec 14900 // will be the first or second operand of the combined shuffle. 14901 Idx = Idx % NumElts; 14902 if (!SV0.getNode() || SV0 == CurrentVec) { 14903 // Ok. CurrentVec is the left hand side. 14904 // Update the mask accordingly. 14905 SV0 = CurrentVec; 14906 Mask.push_back(Idx); 14907 continue; 14908 } 14909 14910 // Bail out if we cannot convert the shuffle pair into a single shuffle. 14911 if (SV1.getNode() && SV1 != CurrentVec) 14912 return SDValue(); 14913 14914 // Ok. CurrentVec is the right hand side. 14915 // Update the mask accordingly. 14916 SV1 = CurrentVec; 14917 Mask.push_back(Idx + NumElts); 14918 } 14919 14920 // Check if all indices in Mask are Undef. In case, propagate Undef. 14921 bool isUndefMask = true; 14922 for (unsigned i = 0; i != NumElts && isUndefMask; ++i) 14923 isUndefMask &= Mask[i] < 0; 14924 14925 if (isUndefMask) 14926 return DAG.getUNDEF(VT); 14927 14928 if (!SV0.getNode()) 14929 SV0 = DAG.getUNDEF(VT); 14930 if (!SV1.getNode()) 14931 SV1 = DAG.getUNDEF(VT); 14932 14933 // Avoid introducing shuffles with illegal mask. 14934 if (!TLI.isShuffleMaskLegal(Mask, VT)) { 14935 ShuffleVectorSDNode::commuteMask(Mask); 14936 14937 if (!TLI.isShuffleMaskLegal(Mask, VT)) 14938 return SDValue(); 14939 14940 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, A, M2) 14941 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, A, M2) 14942 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(C, B, M2) 14943 std::swap(SV0, SV1); 14944 } 14945 14946 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, B, M2) 14947 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(A, C, M2) 14948 // shuffle(shuffle(A, B, M0), C, M1) -> shuffle(B, C, M2) 14949 return DAG.getVectorShuffle(VT, SDLoc(N), SV0, SV1, Mask); 14950 } 14951 14952 return SDValue(); 14953 } 14954 14955 SDValue DAGCombiner::visitSCALAR_TO_VECTOR(SDNode *N) { 14956 SDValue InVal = N->getOperand(0); 14957 EVT VT = N->getValueType(0); 14958 14959 // Replace a SCALAR_TO_VECTOR(EXTRACT_VECTOR_ELT(V,C0)) pattern 14960 // with a VECTOR_SHUFFLE. 14961 if (InVal.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 14962 SDValue InVec = InVal->getOperand(0); 14963 SDValue EltNo = InVal->getOperand(1); 14964 14965 // FIXME: We could support implicit truncation if the shuffle can be 14966 // scaled to a smaller vector scalar type. 14967 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(EltNo); 14968 if (C0 && VT == InVec.getValueType() && 14969 VT.getScalarType() == InVal.getValueType()) { 14970 SmallVector<int, 8> NewMask(VT.getVectorNumElements(), -1); 14971 int Elt = C0->getZExtValue(); 14972 NewMask[0] = Elt; 14973 14974 if (TLI.isShuffleMaskLegal(NewMask, VT)) 14975 return DAG.getVectorShuffle(VT, SDLoc(N), InVec, DAG.getUNDEF(VT), 14976 NewMask); 14977 } 14978 } 14979 14980 return SDValue(); 14981 } 14982 14983 SDValue DAGCombiner::visitINSERT_SUBVECTOR(SDNode *N) { 14984 EVT VT = N->getValueType(0); 14985 SDValue N0 = N->getOperand(0); 14986 SDValue N1 = N->getOperand(1); 14987 SDValue N2 = N->getOperand(2); 14988 14989 // If inserting an UNDEF, just return the original vector. 14990 if (N1.isUndef()) 14991 return N0; 14992 14993 // If this is an insert of an extracted vector into an undef vector, we can 14994 // just use the input to the extract. 14995 if (N0.isUndef() && N1.getOpcode() == ISD::EXTRACT_SUBVECTOR && 14996 N1.getOperand(1) == N2 && N1.getOperand(0).getValueType() == VT) 14997 return N1.getOperand(0); 14998 14999 // Combine INSERT_SUBVECTORs where we are inserting to the same index. 15000 // INSERT_SUBVECTOR( INSERT_SUBVECTOR( Vec, SubOld, Idx ), SubNew, Idx ) 15001 // --> INSERT_SUBVECTOR( Vec, SubNew, Idx ) 15002 if (N0.getOpcode() == ISD::INSERT_SUBVECTOR && 15003 N0.getOperand(1).getValueType() == N1.getValueType() && 15004 N0.getOperand(2) == N2) 15005 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0), 15006 N1, N2); 15007 15008 if (!isa<ConstantSDNode>(N2)) 15009 return SDValue(); 15010 15011 unsigned InsIdx = cast<ConstantSDNode>(N2)->getZExtValue(); 15012 15013 // Canonicalize insert_subvector dag nodes. 15014 // Example: 15015 // (insert_subvector (insert_subvector A, Idx0), Idx1) 15016 // -> (insert_subvector (insert_subvector A, Idx1), Idx0) 15017 if (N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.hasOneUse() && 15018 N1.getValueType() == N0.getOperand(1).getValueType() && 15019 isa<ConstantSDNode>(N0.getOperand(2))) { 15020 unsigned OtherIdx = cast<ConstantSDNode>(N0.getOperand(2))->getZExtValue(); 15021 if (InsIdx < OtherIdx) { 15022 // Swap nodes. 15023 SDValue NewOp = DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N), VT, 15024 N0.getOperand(0), N1, N2); 15025 AddToWorklist(NewOp.getNode()); 15026 return DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(N0.getNode()), 15027 VT, NewOp, N0.getOperand(1), N0.getOperand(2)); 15028 } 15029 } 15030 15031 // If the input vector is a concatenation, and the insert replaces 15032 // one of the pieces, we can optimize into a single concat_vectors. 15033 if (N0.getOpcode() == ISD::CONCAT_VECTORS && N0.hasOneUse() && 15034 N0.getOperand(0).getValueType() == N1.getValueType()) { 15035 unsigned Factor = N1.getValueType().getVectorNumElements(); 15036 15037 SmallVector<SDValue, 8> Ops(N0->op_begin(), N0->op_end()); 15038 Ops[cast<ConstantSDNode>(N2)->getZExtValue() / Factor] = N1; 15039 15040 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Ops); 15041 } 15042 15043 return SDValue(); 15044 } 15045 15046 SDValue DAGCombiner::visitFP_TO_FP16(SDNode *N) { 15047 SDValue N0 = N->getOperand(0); 15048 15049 // fold (fp_to_fp16 (fp16_to_fp op)) -> op 15050 if (N0->getOpcode() == ISD::FP16_TO_FP) 15051 return N0->getOperand(0); 15052 15053 return SDValue(); 15054 } 15055 15056 SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) { 15057 SDValue N0 = N->getOperand(0); 15058 15059 // fold fp16_to_fp(op & 0xffff) -> fp16_to_fp(op) 15060 if (N0->getOpcode() == ISD::AND) { 15061 ConstantSDNode *AndConst = getAsNonOpaqueConstant(N0.getOperand(1)); 15062 if (AndConst && AndConst->getAPIntValue() == 0xffff) { 15063 return DAG.getNode(ISD::FP16_TO_FP, SDLoc(N), N->getValueType(0), 15064 N0.getOperand(0)); 15065 } 15066 } 15067 15068 return SDValue(); 15069 } 15070 15071 /// Returns a vector_shuffle if it able to transform an AND to a vector_shuffle 15072 /// with the destination vector and a zero vector. 15073 /// e.g. AND V, <0xffffffff, 0, 0xffffffff, 0>. ==> 15074 /// vector_shuffle V, Zero, <0, 4, 2, 4> 15075 SDValue DAGCombiner::XformToShuffleWithZero(SDNode *N) { 15076 EVT VT = N->getValueType(0); 15077 SDValue LHS = N->getOperand(0); 15078 SDValue RHS = N->getOperand(1); 15079 SDLoc DL(N); 15080 15081 // Make sure we're not running after operation legalization where it 15082 // may have custom lowered the vector shuffles. 15083 if (LegalOperations) 15084 return SDValue(); 15085 15086 if (N->getOpcode() != ISD::AND) 15087 return SDValue(); 15088 15089 if (RHS.getOpcode() == ISD::BITCAST) 15090 RHS = RHS.getOperand(0); 15091 15092 if (RHS.getOpcode() != ISD::BUILD_VECTOR) 15093 return SDValue(); 15094 15095 EVT RVT = RHS.getValueType(); 15096 unsigned NumElts = RHS.getNumOperands(); 15097 15098 // Attempt to create a valid clear mask, splitting the mask into 15099 // sub elements and checking to see if each is 15100 // all zeros or all ones - suitable for shuffle masking. 15101 auto BuildClearMask = [&](int Split) { 15102 int NumSubElts = NumElts * Split; 15103 int NumSubBits = RVT.getScalarSizeInBits() / Split; 15104 15105 SmallVector<int, 8> Indices; 15106 for (int i = 0; i != NumSubElts; ++i) { 15107 int EltIdx = i / Split; 15108 int SubIdx = i % Split; 15109 SDValue Elt = RHS.getOperand(EltIdx); 15110 if (Elt.isUndef()) { 15111 Indices.push_back(-1); 15112 continue; 15113 } 15114 15115 APInt Bits; 15116 if (isa<ConstantSDNode>(Elt)) 15117 Bits = cast<ConstantSDNode>(Elt)->getAPIntValue(); 15118 else if (isa<ConstantFPSDNode>(Elt)) 15119 Bits = cast<ConstantFPSDNode>(Elt)->getValueAPF().bitcastToAPInt(); 15120 else 15121 return SDValue(); 15122 15123 // Extract the sub element from the constant bit mask. 15124 if (DAG.getDataLayout().isBigEndian()) { 15125 Bits = Bits.lshr((Split - SubIdx - 1) * NumSubBits); 15126 } else { 15127 Bits = Bits.lshr(SubIdx * NumSubBits); 15128 } 15129 15130 if (Split > 1) 15131 Bits = Bits.trunc(NumSubBits); 15132 15133 if (Bits.isAllOnesValue()) 15134 Indices.push_back(i); 15135 else if (Bits == 0) 15136 Indices.push_back(i + NumSubElts); 15137 else 15138 return SDValue(); 15139 } 15140 15141 // Let's see if the target supports this vector_shuffle. 15142 EVT ClearSVT = EVT::getIntegerVT(*DAG.getContext(), NumSubBits); 15143 EVT ClearVT = EVT::getVectorVT(*DAG.getContext(), ClearSVT, NumSubElts); 15144 if (!TLI.isVectorClearMaskLegal(Indices, ClearVT)) 15145 return SDValue(); 15146 15147 SDValue Zero = DAG.getConstant(0, DL, ClearVT); 15148 return DAG.getBitcast(VT, DAG.getVectorShuffle(ClearVT, DL, 15149 DAG.getBitcast(ClearVT, LHS), 15150 Zero, Indices)); 15151 }; 15152 15153 // Determine maximum split level (byte level masking). 15154 int MaxSplit = 1; 15155 if (RVT.getScalarSizeInBits() % 8 == 0) 15156 MaxSplit = RVT.getScalarSizeInBits() / 8; 15157 15158 for (int Split = 1; Split <= MaxSplit; ++Split) 15159 if (RVT.getScalarSizeInBits() % Split == 0) 15160 if (SDValue S = BuildClearMask(Split)) 15161 return S; 15162 15163 return SDValue(); 15164 } 15165 15166 /// Visit a binary vector operation, like ADD. 15167 SDValue DAGCombiner::SimplifyVBinOp(SDNode *N) { 15168 assert(N->getValueType(0).isVector() && 15169 "SimplifyVBinOp only works on vectors!"); 15170 15171 SDValue LHS = N->getOperand(0); 15172 SDValue RHS = N->getOperand(1); 15173 SDValue Ops[] = {LHS, RHS}; 15174 15175 // See if we can constant fold the vector operation. 15176 if (SDValue Fold = DAG.FoldConstantVectorArithmetic( 15177 N->getOpcode(), SDLoc(LHS), LHS.getValueType(), Ops, N->getFlags())) 15178 return Fold; 15179 15180 // Try to convert a constant mask AND into a shuffle clear mask. 15181 if (SDValue Shuffle = XformToShuffleWithZero(N)) 15182 return Shuffle; 15183 15184 // Type legalization might introduce new shuffles in the DAG. 15185 // Fold (VBinOp (shuffle (A, Undef, Mask)), (shuffle (B, Undef, Mask))) 15186 // -> (shuffle (VBinOp (A, B)), Undef, Mask). 15187 if (LegalTypes && isa<ShuffleVectorSDNode>(LHS) && 15188 isa<ShuffleVectorSDNode>(RHS) && LHS.hasOneUse() && RHS.hasOneUse() && 15189 LHS.getOperand(1).isUndef() && 15190 RHS.getOperand(1).isUndef()) { 15191 ShuffleVectorSDNode *SVN0 = cast<ShuffleVectorSDNode>(LHS); 15192 ShuffleVectorSDNode *SVN1 = cast<ShuffleVectorSDNode>(RHS); 15193 15194 if (SVN0->getMask().equals(SVN1->getMask())) { 15195 EVT VT = N->getValueType(0); 15196 SDValue UndefVector = LHS.getOperand(1); 15197 SDValue NewBinOp = DAG.getNode(N->getOpcode(), SDLoc(N), VT, 15198 LHS.getOperand(0), RHS.getOperand(0), 15199 N->getFlags()); 15200 AddUsersToWorklist(N); 15201 return DAG.getVectorShuffle(VT, SDLoc(N), NewBinOp, UndefVector, 15202 SVN0->getMask()); 15203 } 15204 } 15205 15206 return SDValue(); 15207 } 15208 15209 SDValue DAGCombiner::SimplifySelect(const SDLoc &DL, SDValue N0, SDValue N1, 15210 SDValue N2) { 15211 assert(N0.getOpcode() ==ISD::SETCC && "First argument must be a SetCC node!"); 15212 15213 SDValue SCC = SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1), N1, N2, 15214 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 15215 15216 // If we got a simplified select_cc node back from SimplifySelectCC, then 15217 // break it down into a new SETCC node, and a new SELECT node, and then return 15218 // the SELECT node, since we were called with a SELECT node. 15219 if (SCC.getNode()) { 15220 // Check to see if we got a select_cc back (to turn into setcc/select). 15221 // Otherwise, just return whatever node we got back, like fabs. 15222 if (SCC.getOpcode() == ISD::SELECT_CC) { 15223 SDValue SETCC = DAG.getNode(ISD::SETCC, SDLoc(N0), 15224 N0.getValueType(), 15225 SCC.getOperand(0), SCC.getOperand(1), 15226 SCC.getOperand(4)); 15227 AddToWorklist(SETCC.getNode()); 15228 return DAG.getSelect(SDLoc(SCC), SCC.getValueType(), SETCC, 15229 SCC.getOperand(2), SCC.getOperand(3)); 15230 } 15231 15232 return SCC; 15233 } 15234 return SDValue(); 15235 } 15236 15237 /// Given a SELECT or a SELECT_CC node, where LHS and RHS are the two values 15238 /// being selected between, see if we can simplify the select. Callers of this 15239 /// should assume that TheSelect is deleted if this returns true. As such, they 15240 /// should return the appropriate thing (e.g. the node) back to the top-level of 15241 /// the DAG combiner loop to avoid it being looked at. 15242 bool DAGCombiner::SimplifySelectOps(SDNode *TheSelect, SDValue LHS, 15243 SDValue RHS) { 15244 15245 // fold (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x)) 15246 // The select + setcc is redundant, because fsqrt returns NaN for X < 0. 15247 if (const ConstantFPSDNode *NaN = isConstOrConstSplatFP(LHS)) { 15248 if (NaN->isNaN() && RHS.getOpcode() == ISD::FSQRT) { 15249 // We have: (select (setcc ?, ?, ?), NaN, (fsqrt ?)) 15250 SDValue Sqrt = RHS; 15251 ISD::CondCode CC; 15252 SDValue CmpLHS; 15253 const ConstantFPSDNode *Zero = nullptr; 15254 15255 if (TheSelect->getOpcode() == ISD::SELECT_CC) { 15256 CC = dyn_cast<CondCodeSDNode>(TheSelect->getOperand(4))->get(); 15257 CmpLHS = TheSelect->getOperand(0); 15258 Zero = isConstOrConstSplatFP(TheSelect->getOperand(1)); 15259 } else { 15260 // SELECT or VSELECT 15261 SDValue Cmp = TheSelect->getOperand(0); 15262 if (Cmp.getOpcode() == ISD::SETCC) { 15263 CC = dyn_cast<CondCodeSDNode>(Cmp.getOperand(2))->get(); 15264 CmpLHS = Cmp.getOperand(0); 15265 Zero = isConstOrConstSplatFP(Cmp.getOperand(1)); 15266 } 15267 } 15268 if (Zero && Zero->isZero() && 15269 Sqrt.getOperand(0) == CmpLHS && (CC == ISD::SETOLT || 15270 CC == ISD::SETULT || CC == ISD::SETLT)) { 15271 // We have: (select (setcc x, [+-]0.0, *lt), NaN, (fsqrt x)) 15272 CombineTo(TheSelect, Sqrt); 15273 return true; 15274 } 15275 } 15276 } 15277 // Cannot simplify select with vector condition 15278 if (TheSelect->getOperand(0).getValueType().isVector()) return false; 15279 15280 // If this is a select from two identical things, try to pull the operation 15281 // through the select. 15282 if (LHS.getOpcode() != RHS.getOpcode() || 15283 !LHS.hasOneUse() || !RHS.hasOneUse()) 15284 return false; 15285 15286 // If this is a load and the token chain is identical, replace the select 15287 // of two loads with a load through a select of the address to load from. 15288 // This triggers in things like "select bool X, 10.0, 123.0" after the FP 15289 // constants have been dropped into the constant pool. 15290 if (LHS.getOpcode() == ISD::LOAD) { 15291 LoadSDNode *LLD = cast<LoadSDNode>(LHS); 15292 LoadSDNode *RLD = cast<LoadSDNode>(RHS); 15293 15294 // Token chains must be identical. 15295 if (LHS.getOperand(0) != RHS.getOperand(0) || 15296 // Do not let this transformation reduce the number of volatile loads. 15297 LLD->isVolatile() || RLD->isVolatile() || 15298 // FIXME: If either is a pre/post inc/dec load, 15299 // we'd need to split out the address adjustment. 15300 LLD->isIndexed() || RLD->isIndexed() || 15301 // If this is an EXTLOAD, the VT's must match. 15302 LLD->getMemoryVT() != RLD->getMemoryVT() || 15303 // If this is an EXTLOAD, the kind of extension must match. 15304 (LLD->getExtensionType() != RLD->getExtensionType() && 15305 // The only exception is if one of the extensions is anyext. 15306 LLD->getExtensionType() != ISD::EXTLOAD && 15307 RLD->getExtensionType() != ISD::EXTLOAD) || 15308 // FIXME: this discards src value information. This is 15309 // over-conservative. It would be beneficial to be able to remember 15310 // both potential memory locations. Since we are discarding 15311 // src value info, don't do the transformation if the memory 15312 // locations are not in the default address space. 15313 LLD->getPointerInfo().getAddrSpace() != 0 || 15314 RLD->getPointerInfo().getAddrSpace() != 0 || 15315 !TLI.isOperationLegalOrCustom(TheSelect->getOpcode(), 15316 LLD->getBasePtr().getValueType())) 15317 return false; 15318 15319 // Check that the select condition doesn't reach either load. If so, 15320 // folding this will induce a cycle into the DAG. If not, this is safe to 15321 // xform, so create a select of the addresses. 15322 SDValue Addr; 15323 if (TheSelect->getOpcode() == ISD::SELECT) { 15324 SDNode *CondNode = TheSelect->getOperand(0).getNode(); 15325 if ((LLD->hasAnyUseOfValue(1) && LLD->isPredecessorOf(CondNode)) || 15326 (RLD->hasAnyUseOfValue(1) && RLD->isPredecessorOf(CondNode))) 15327 return false; 15328 // The loads must not depend on one another. 15329 if (LLD->isPredecessorOf(RLD) || 15330 RLD->isPredecessorOf(LLD)) 15331 return false; 15332 Addr = DAG.getSelect(SDLoc(TheSelect), 15333 LLD->getBasePtr().getValueType(), 15334 TheSelect->getOperand(0), LLD->getBasePtr(), 15335 RLD->getBasePtr()); 15336 } else { // Otherwise SELECT_CC 15337 SDNode *CondLHS = TheSelect->getOperand(0).getNode(); 15338 SDNode *CondRHS = TheSelect->getOperand(1).getNode(); 15339 15340 if ((LLD->hasAnyUseOfValue(1) && 15341 (LLD->isPredecessorOf(CondLHS) || LLD->isPredecessorOf(CondRHS))) || 15342 (RLD->hasAnyUseOfValue(1) && 15343 (RLD->isPredecessorOf(CondLHS) || RLD->isPredecessorOf(CondRHS)))) 15344 return false; 15345 15346 Addr = DAG.getNode(ISD::SELECT_CC, SDLoc(TheSelect), 15347 LLD->getBasePtr().getValueType(), 15348 TheSelect->getOperand(0), 15349 TheSelect->getOperand(1), 15350 LLD->getBasePtr(), RLD->getBasePtr(), 15351 TheSelect->getOperand(4)); 15352 } 15353 15354 SDValue Load; 15355 // It is safe to replace the two loads if they have different alignments, 15356 // but the new load must be the minimum (most restrictive) alignment of the 15357 // inputs. 15358 unsigned Alignment = std::min(LLD->getAlignment(), RLD->getAlignment()); 15359 MachineMemOperand::Flags MMOFlags = LLD->getMemOperand()->getFlags(); 15360 if (!RLD->isInvariant()) 15361 MMOFlags &= ~MachineMemOperand::MOInvariant; 15362 if (!RLD->isDereferenceable()) 15363 MMOFlags &= ~MachineMemOperand::MODereferenceable; 15364 if (LLD->getExtensionType() == ISD::NON_EXTLOAD) { 15365 // FIXME: Discards pointer and AA info. 15366 Load = DAG.getLoad(TheSelect->getValueType(0), SDLoc(TheSelect), 15367 LLD->getChain(), Addr, MachinePointerInfo(), Alignment, 15368 MMOFlags); 15369 } else { 15370 // FIXME: Discards pointer and AA info. 15371 Load = DAG.getExtLoad( 15372 LLD->getExtensionType() == ISD::EXTLOAD ? RLD->getExtensionType() 15373 : LLD->getExtensionType(), 15374 SDLoc(TheSelect), TheSelect->getValueType(0), LLD->getChain(), Addr, 15375 MachinePointerInfo(), LLD->getMemoryVT(), Alignment, MMOFlags); 15376 } 15377 15378 // Users of the select now use the result of the load. 15379 CombineTo(TheSelect, Load); 15380 15381 // Users of the old loads now use the new load's chain. We know the 15382 // old-load value is dead now. 15383 CombineTo(LHS.getNode(), Load.getValue(0), Load.getValue(1)); 15384 CombineTo(RHS.getNode(), Load.getValue(0), Load.getValue(1)); 15385 return true; 15386 } 15387 15388 return false; 15389 } 15390 15391 /// Try to fold an expression of the form (N0 cond N1) ? N2 : N3 to a shift and 15392 /// bitwise 'and'. 15393 SDValue DAGCombiner::foldSelectCCToShiftAnd(const SDLoc &DL, SDValue N0, 15394 SDValue N1, SDValue N2, SDValue N3, 15395 ISD::CondCode CC) { 15396 // If this is a select where the false operand is zero and the compare is a 15397 // check of the sign bit, see if we can perform the "gzip trick": 15398 // select_cc setlt X, 0, A, 0 -> and (sra X, size(X)-1), A 15399 // select_cc setgt X, 0, A, 0 -> and (not (sra X, size(X)-1)), A 15400 EVT XType = N0.getValueType(); 15401 EVT AType = N2.getValueType(); 15402 if (!isNullConstant(N3) || !XType.bitsGE(AType)) 15403 return SDValue(); 15404 15405 // If the comparison is testing for a positive value, we have to invert 15406 // the sign bit mask, so only do that transform if the target has a bitwise 15407 // 'and not' instruction (the invert is free). 15408 if (CC == ISD::SETGT && TLI.hasAndNot(N2)) { 15409 // (X > -1) ? A : 0 15410 // (X > 0) ? X : 0 <-- This is canonical signed max. 15411 if (!(isAllOnesConstant(N1) || (isNullConstant(N1) && N0 == N2))) 15412 return SDValue(); 15413 } else if (CC == ISD::SETLT) { 15414 // (X < 0) ? A : 0 15415 // (X < 1) ? X : 0 <-- This is un-canonicalized signed min. 15416 if (!(isNullConstant(N1) || (isOneConstant(N1) && N0 == N2))) 15417 return SDValue(); 15418 } else { 15419 return SDValue(); 15420 } 15421 15422 // and (sra X, size(X)-1), A -> "and (srl X, C2), A" iff A is a single-bit 15423 // constant. 15424 EVT ShiftAmtTy = getShiftAmountTy(N0.getValueType()); 15425 auto *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 15426 if (N2C && ((N2C->getAPIntValue() & (N2C->getAPIntValue() - 1)) == 0)) { 15427 unsigned ShCt = XType.getSizeInBits() - N2C->getAPIntValue().logBase2() - 1; 15428 SDValue ShiftAmt = DAG.getConstant(ShCt, DL, ShiftAmtTy); 15429 SDValue Shift = DAG.getNode(ISD::SRL, DL, XType, N0, ShiftAmt); 15430 AddToWorklist(Shift.getNode()); 15431 15432 if (XType.bitsGT(AType)) { 15433 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 15434 AddToWorklist(Shift.getNode()); 15435 } 15436 15437 if (CC == ISD::SETGT) 15438 Shift = DAG.getNOT(DL, Shift, AType); 15439 15440 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 15441 } 15442 15443 SDValue ShiftAmt = DAG.getConstant(XType.getSizeInBits() - 1, DL, ShiftAmtTy); 15444 SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, N0, ShiftAmt); 15445 AddToWorklist(Shift.getNode()); 15446 15447 if (XType.bitsGT(AType)) { 15448 Shift = DAG.getNode(ISD::TRUNCATE, DL, AType, Shift); 15449 AddToWorklist(Shift.getNode()); 15450 } 15451 15452 if (CC == ISD::SETGT) 15453 Shift = DAG.getNOT(DL, Shift, AType); 15454 15455 return DAG.getNode(ISD::AND, DL, AType, Shift, N2); 15456 } 15457 15458 /// Simplify an expression of the form (N0 cond N1) ? N2 : N3 15459 /// where 'cond' is the comparison specified by CC. 15460 SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1, 15461 SDValue N2, SDValue N3, ISD::CondCode CC, 15462 bool NotExtCompare) { 15463 // (x ? y : y) -> y. 15464 if (N2 == N3) return N2; 15465 15466 EVT VT = N2.getValueType(); 15467 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode()); 15468 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode()); 15469 15470 // Determine if the condition we're dealing with is constant 15471 SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), 15472 N0, N1, CC, DL, false); 15473 if (SCC.getNode()) AddToWorklist(SCC.getNode()); 15474 15475 if (ConstantSDNode *SCCC = dyn_cast_or_null<ConstantSDNode>(SCC.getNode())) { 15476 // fold select_cc true, x, y -> x 15477 // fold select_cc false, x, y -> y 15478 return !SCCC->isNullValue() ? N2 : N3; 15479 } 15480 15481 // Check to see if we can simplify the select into an fabs node 15482 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1)) { 15483 // Allow either -0.0 or 0.0 15484 if (CFP->isZero()) { 15485 // select (setg[te] X, +/-0.0), X, fneg(X) -> fabs 15486 if ((CC == ISD::SETGE || CC == ISD::SETGT) && 15487 N0 == N2 && N3.getOpcode() == ISD::FNEG && 15488 N2 == N3.getOperand(0)) 15489 return DAG.getNode(ISD::FABS, DL, VT, N0); 15490 15491 // select (setl[te] X, +/-0.0), fneg(X), X -> fabs 15492 if ((CC == ISD::SETLT || CC == ISD::SETLE) && 15493 N0 == N3 && N2.getOpcode() == ISD::FNEG && 15494 N2.getOperand(0) == N3) 15495 return DAG.getNode(ISD::FABS, DL, VT, N3); 15496 } 15497 } 15498 15499 // Turn "(a cond b) ? 1.0f : 2.0f" into "load (tmp + ((a cond b) ? 0 : 4)" 15500 // where "tmp" is a constant pool entry containing an array with 1.0 and 2.0 15501 // in it. This is a win when the constant is not otherwise available because 15502 // it replaces two constant pool loads with one. We only do this if the FP 15503 // type is known to be legal, because if it isn't, then we are before legalize 15504 // types an we want the other legalization to happen first (e.g. to avoid 15505 // messing with soft float) and if the ConstantFP is not legal, because if 15506 // it is legal, we may not need to store the FP constant in a constant pool. 15507 if (ConstantFPSDNode *TV = dyn_cast<ConstantFPSDNode>(N2)) 15508 if (ConstantFPSDNode *FV = dyn_cast<ConstantFPSDNode>(N3)) { 15509 if (TLI.isTypeLegal(N2.getValueType()) && 15510 (TLI.getOperationAction(ISD::ConstantFP, N2.getValueType()) != 15511 TargetLowering::Legal && 15512 !TLI.isFPImmLegal(TV->getValueAPF(), TV->getValueType(0)) && 15513 !TLI.isFPImmLegal(FV->getValueAPF(), FV->getValueType(0))) && 15514 // If both constants have multiple uses, then we won't need to do an 15515 // extra load, they are likely around in registers for other users. 15516 (TV->hasOneUse() || FV->hasOneUse())) { 15517 Constant *Elts[] = { 15518 const_cast<ConstantFP*>(FV->getConstantFPValue()), 15519 const_cast<ConstantFP*>(TV->getConstantFPValue()) 15520 }; 15521 Type *FPTy = Elts[0]->getType(); 15522 const DataLayout &TD = DAG.getDataLayout(); 15523 15524 // Create a ConstantArray of the two constants. 15525 Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts); 15526 SDValue CPIdx = 15527 DAG.getConstantPool(CA, TLI.getPointerTy(DAG.getDataLayout()), 15528 TD.getPrefTypeAlignment(FPTy)); 15529 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment(); 15530 15531 // Get the offsets to the 0 and 1 element of the array so that we can 15532 // select between them. 15533 SDValue Zero = DAG.getIntPtrConstant(0, DL); 15534 unsigned EltSize = (unsigned)TD.getTypeAllocSize(Elts[0]->getType()); 15535 SDValue One = DAG.getIntPtrConstant(EltSize, SDLoc(FV)); 15536 15537 SDValue Cond = DAG.getSetCC(DL, 15538 getSetCCResultType(N0.getValueType()), 15539 N0, N1, CC); 15540 AddToWorklist(Cond.getNode()); 15541 SDValue CstOffset = DAG.getSelect(DL, Zero.getValueType(), 15542 Cond, One, Zero); 15543 AddToWorklist(CstOffset.getNode()); 15544 CPIdx = DAG.getNode(ISD::ADD, DL, CPIdx.getValueType(), CPIdx, 15545 CstOffset); 15546 AddToWorklist(CPIdx.getNode()); 15547 return DAG.getLoad( 15548 TV->getValueType(0), DL, DAG.getEntryNode(), CPIdx, 15549 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), 15550 Alignment); 15551 } 15552 } 15553 15554 if (SDValue V = foldSelectCCToShiftAnd(DL, N0, N1, N2, N3, CC)) 15555 return V; 15556 15557 // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A) 15558 // where y is has a single bit set. 15559 // A plaintext description would be, we can turn the SELECT_CC into an AND 15560 // when the condition can be materialized as an all-ones register. Any 15561 // single bit-test can be materialized as an all-ones register with 15562 // shift-left and shift-right-arith. 15563 if (CC == ISD::SETEQ && N0->getOpcode() == ISD::AND && 15564 N0->getValueType(0) == VT && isNullConstant(N1) && isNullConstant(N2)) { 15565 SDValue AndLHS = N0->getOperand(0); 15566 ConstantSDNode *ConstAndRHS = dyn_cast<ConstantSDNode>(N0->getOperand(1)); 15567 if (ConstAndRHS && ConstAndRHS->getAPIntValue().countPopulation() == 1) { 15568 // Shift the tested bit over the sign bit. 15569 const APInt &AndMask = ConstAndRHS->getAPIntValue(); 15570 SDValue ShlAmt = 15571 DAG.getConstant(AndMask.countLeadingZeros(), SDLoc(AndLHS), 15572 getShiftAmountTy(AndLHS.getValueType())); 15573 SDValue Shl = DAG.getNode(ISD::SHL, SDLoc(N0), VT, AndLHS, ShlAmt); 15574 15575 // Now arithmetic right shift it all the way over, so the result is either 15576 // all-ones, or zero. 15577 SDValue ShrAmt = 15578 DAG.getConstant(AndMask.getBitWidth() - 1, SDLoc(Shl), 15579 getShiftAmountTy(Shl.getValueType())); 15580 SDValue Shr = DAG.getNode(ISD::SRA, SDLoc(N0), VT, Shl, ShrAmt); 15581 15582 return DAG.getNode(ISD::AND, DL, VT, Shr, N3); 15583 } 15584 } 15585 15586 // fold select C, 16, 0 -> shl C, 4 15587 if (N2C && isNullConstant(N3) && N2C->getAPIntValue().isPowerOf2() && 15588 TLI.getBooleanContents(N0.getValueType()) == 15589 TargetLowering::ZeroOrOneBooleanContent) { 15590 15591 // If the caller doesn't want us to simplify this into a zext of a compare, 15592 // don't do it. 15593 if (NotExtCompare && N2C->isOne()) 15594 return SDValue(); 15595 15596 // Get a SetCC of the condition 15597 // NOTE: Don't create a SETCC if it's not legal on this target. 15598 if (!LegalOperations || 15599 TLI.isOperationLegal(ISD::SETCC, N0.getValueType())) { 15600 SDValue Temp, SCC; 15601 // cast from setcc result type to select result type 15602 if (LegalTypes) { 15603 SCC = DAG.getSetCC(DL, getSetCCResultType(N0.getValueType()), 15604 N0, N1, CC); 15605 if (N2.getValueType().bitsLT(SCC.getValueType())) 15606 Temp = DAG.getZeroExtendInReg(SCC, SDLoc(N2), 15607 N2.getValueType()); 15608 else 15609 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), 15610 N2.getValueType(), SCC); 15611 } else { 15612 SCC = DAG.getSetCC(SDLoc(N0), MVT::i1, N0, N1, CC); 15613 Temp = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N2), 15614 N2.getValueType(), SCC); 15615 } 15616 15617 AddToWorklist(SCC.getNode()); 15618 AddToWorklist(Temp.getNode()); 15619 15620 if (N2C->isOne()) 15621 return Temp; 15622 15623 // shl setcc result by log2 n2c 15624 return DAG.getNode( 15625 ISD::SHL, DL, N2.getValueType(), Temp, 15626 DAG.getConstant(N2C->getAPIntValue().logBase2(), SDLoc(Temp), 15627 getShiftAmountTy(Temp.getValueType()))); 15628 } 15629 } 15630 15631 // Check to see if this is an integer abs. 15632 // select_cc setg[te] X, 0, X, -X -> 15633 // select_cc setgt X, -1, X, -X -> 15634 // select_cc setl[te] X, 0, -X, X -> 15635 // select_cc setlt X, 1, -X, X -> 15636 // Y = sra (X, size(X)-1); xor (add (X, Y), Y) 15637 if (N1C) { 15638 ConstantSDNode *SubC = nullptr; 15639 if (((N1C->isNullValue() && (CC == ISD::SETGT || CC == ISD::SETGE)) || 15640 (N1C->isAllOnesValue() && CC == ISD::SETGT)) && 15641 N0 == N2 && N3.getOpcode() == ISD::SUB && N0 == N3.getOperand(1)) 15642 SubC = dyn_cast<ConstantSDNode>(N3.getOperand(0)); 15643 else if (((N1C->isNullValue() && (CC == ISD::SETLT || CC == ISD::SETLE)) || 15644 (N1C->isOne() && CC == ISD::SETLT)) && 15645 N0 == N3 && N2.getOpcode() == ISD::SUB && N0 == N2.getOperand(1)) 15646 SubC = dyn_cast<ConstantSDNode>(N2.getOperand(0)); 15647 15648 EVT XType = N0.getValueType(); 15649 if (SubC && SubC->isNullValue() && XType.isInteger()) { 15650 SDLoc DL(N0); 15651 SDValue Shift = DAG.getNode(ISD::SRA, DL, XType, 15652 N0, 15653 DAG.getConstant(XType.getSizeInBits() - 1, DL, 15654 getShiftAmountTy(N0.getValueType()))); 15655 SDValue Add = DAG.getNode(ISD::ADD, DL, 15656 XType, N0, Shift); 15657 AddToWorklist(Shift.getNode()); 15658 AddToWorklist(Add.getNode()); 15659 return DAG.getNode(ISD::XOR, DL, XType, Add, Shift); 15660 } 15661 } 15662 15663 // select_cc seteq X, 0, sizeof(X), ctlz(X) -> ctlz(X) 15664 // select_cc seteq X, 0, sizeof(X), ctlz_zero_undef(X) -> ctlz(X) 15665 // select_cc seteq X, 0, sizeof(X), cttz(X) -> cttz(X) 15666 // select_cc seteq X, 0, sizeof(X), cttz_zero_undef(X) -> cttz(X) 15667 // select_cc setne X, 0, ctlz(X), sizeof(X) -> ctlz(X) 15668 // select_cc setne X, 0, ctlz_zero_undef(X), sizeof(X) -> ctlz(X) 15669 // select_cc setne X, 0, cttz(X), sizeof(X) -> cttz(X) 15670 // select_cc setne X, 0, cttz_zero_undef(X), sizeof(X) -> cttz(X) 15671 if (N1C && N1C->isNullValue() && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 15672 SDValue ValueOnZero = N2; 15673 SDValue Count = N3; 15674 // If the condition is NE instead of E, swap the operands. 15675 if (CC == ISD::SETNE) 15676 std::swap(ValueOnZero, Count); 15677 // Check if the value on zero is a constant equal to the bits in the type. 15678 if (auto *ValueOnZeroC = dyn_cast<ConstantSDNode>(ValueOnZero)) { 15679 if (ValueOnZeroC->getAPIntValue() == VT.getSizeInBits()) { 15680 // If the other operand is cttz/cttz_zero_undef of N0, and cttz is 15681 // legal, combine to just cttz. 15682 if ((Count.getOpcode() == ISD::CTTZ || 15683 Count.getOpcode() == ISD::CTTZ_ZERO_UNDEF) && 15684 N0 == Count.getOperand(0) && 15685 (!LegalOperations || TLI.isOperationLegal(ISD::CTTZ, VT))) 15686 return DAG.getNode(ISD::CTTZ, DL, VT, N0); 15687 // If the other operand is ctlz/ctlz_zero_undef of N0, and ctlz is 15688 // legal, combine to just ctlz. 15689 if ((Count.getOpcode() == ISD::CTLZ || 15690 Count.getOpcode() == ISD::CTLZ_ZERO_UNDEF) && 15691 N0 == Count.getOperand(0) && 15692 (!LegalOperations || TLI.isOperationLegal(ISD::CTLZ, VT))) 15693 return DAG.getNode(ISD::CTLZ, DL, VT, N0); 15694 } 15695 } 15696 } 15697 15698 return SDValue(); 15699 } 15700 15701 /// This is a stub for TargetLowering::SimplifySetCC. 15702 SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 15703 ISD::CondCode Cond, const SDLoc &DL, 15704 bool foldBooleans) { 15705 TargetLowering::DAGCombinerInfo 15706 DagCombineInfo(DAG, Level, false, this); 15707 return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL); 15708 } 15709 15710 /// Given an ISD::SDIV node expressing a divide by constant, return 15711 /// a DAG expression to select that will generate the same value by multiplying 15712 /// by a magic number. 15713 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 15714 SDValue DAGCombiner::BuildSDIV(SDNode *N) { 15715 // when optimising for minimum size, we don't want to expand a div to a mul 15716 // and a shift. 15717 if (DAG.getMachineFunction().getFunction()->optForMinSize()) 15718 return SDValue(); 15719 15720 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 15721 if (!C) 15722 return SDValue(); 15723 15724 // Avoid division by zero. 15725 if (C->isNullValue()) 15726 return SDValue(); 15727 15728 std::vector<SDNode*> Built; 15729 SDValue S = 15730 TLI.BuildSDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built); 15731 15732 for (SDNode *N : Built) 15733 AddToWorklist(N); 15734 return S; 15735 } 15736 15737 /// Given an ISD::SDIV node expressing a divide by constant power of 2, return a 15738 /// DAG expression that will generate the same value by right shifting. 15739 SDValue DAGCombiner::BuildSDIVPow2(SDNode *N) { 15740 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 15741 if (!C) 15742 return SDValue(); 15743 15744 // Avoid division by zero. 15745 if (C->isNullValue()) 15746 return SDValue(); 15747 15748 std::vector<SDNode *> Built; 15749 SDValue S = TLI.BuildSDIVPow2(N, C->getAPIntValue(), DAG, &Built); 15750 15751 for (SDNode *N : Built) 15752 AddToWorklist(N); 15753 return S; 15754 } 15755 15756 /// Given an ISD::UDIV node expressing a divide by constant, return a DAG 15757 /// expression that will generate the same value by multiplying by a magic 15758 /// number. 15759 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 15760 SDValue DAGCombiner::BuildUDIV(SDNode *N) { 15761 // when optimising for minimum size, we don't want to expand a div to a mul 15762 // and a shift. 15763 if (DAG.getMachineFunction().getFunction()->optForMinSize()) 15764 return SDValue(); 15765 15766 ConstantSDNode *C = isConstOrConstSplat(N->getOperand(1)); 15767 if (!C) 15768 return SDValue(); 15769 15770 // Avoid division by zero. 15771 if (C->isNullValue()) 15772 return SDValue(); 15773 15774 std::vector<SDNode*> Built; 15775 SDValue S = 15776 TLI.BuildUDIV(N, C->getAPIntValue(), DAG, LegalOperations, &Built); 15777 15778 for (SDNode *N : Built) 15779 AddToWorklist(N); 15780 return S; 15781 } 15782 15783 /// Determines the LogBase2 value for a non-null input value using the 15784 /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V). 15785 SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) { 15786 EVT VT = V.getValueType(); 15787 unsigned EltBits = VT.getScalarSizeInBits(); 15788 SDValue Ctlz = DAG.getNode(ISD::CTLZ, DL, VT, V); 15789 SDValue Base = DAG.getConstant(EltBits - 1, DL, VT); 15790 SDValue LogBase2 = DAG.getNode(ISD::SUB, DL, VT, Base, Ctlz); 15791 return LogBase2; 15792 } 15793 15794 /// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 15795 /// For the reciprocal, we need to find the zero of the function: 15796 /// F(X) = A X - 1 [which has a zero at X = 1/A] 15797 /// => 15798 /// X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form 15799 /// does not require additional intermediate precision] 15800 SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags) { 15801 if (Level >= AfterLegalizeDAG) 15802 return SDValue(); 15803 15804 // TODO: Handle half and/or extended types? 15805 EVT VT = Op.getValueType(); 15806 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64) 15807 return SDValue(); 15808 15809 // If estimates are explicitly disabled for this function, we're done. 15810 MachineFunction &MF = DAG.getMachineFunction(); 15811 int Enabled = TLI.getRecipEstimateDivEnabled(VT, MF); 15812 if (Enabled == TLI.ReciprocalEstimate::Disabled) 15813 return SDValue(); 15814 15815 // Estimates may be explicitly enabled for this type with a custom number of 15816 // refinement steps. 15817 int Iterations = TLI.getDivRefinementSteps(VT, MF); 15818 if (SDValue Est = TLI.getRecipEstimate(Op, DAG, Enabled, Iterations)) { 15819 AddToWorklist(Est.getNode()); 15820 15821 if (Iterations) { 15822 EVT VT = Op.getValueType(); 15823 SDLoc DL(Op); 15824 SDValue FPOne = DAG.getConstantFP(1.0, DL, VT); 15825 15826 // Newton iterations: Est = Est + Est (1 - Arg * Est) 15827 for (int i = 0; i < Iterations; ++i) { 15828 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Op, Est, Flags); 15829 AddToWorklist(NewEst.getNode()); 15830 15831 NewEst = DAG.getNode(ISD::FSUB, DL, VT, FPOne, NewEst, Flags); 15832 AddToWorklist(NewEst.getNode()); 15833 15834 NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags); 15835 AddToWorklist(NewEst.getNode()); 15836 15837 Est = DAG.getNode(ISD::FADD, DL, VT, Est, NewEst, Flags); 15838 AddToWorklist(Est.getNode()); 15839 } 15840 } 15841 return Est; 15842 } 15843 15844 return SDValue(); 15845 } 15846 15847 /// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 15848 /// For the reciprocal sqrt, we need to find the zero of the function: 15849 /// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 15850 /// => 15851 /// X_{i+1} = X_i (1.5 - A X_i^2 / 2) 15852 /// As a result, we precompute A/2 prior to the iteration loop. 15853 SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est, 15854 unsigned Iterations, 15855 SDNodeFlags *Flags, bool Reciprocal) { 15856 EVT VT = Arg.getValueType(); 15857 SDLoc DL(Arg); 15858 SDValue ThreeHalves = DAG.getConstantFP(1.5, DL, VT); 15859 15860 // We now need 0.5 * Arg which we can write as (1.5 * Arg - Arg) so that 15861 // this entire sequence requires only one FP constant. 15862 SDValue HalfArg = DAG.getNode(ISD::FMUL, DL, VT, ThreeHalves, Arg, Flags); 15863 AddToWorklist(HalfArg.getNode()); 15864 15865 HalfArg = DAG.getNode(ISD::FSUB, DL, VT, HalfArg, Arg, Flags); 15866 AddToWorklist(HalfArg.getNode()); 15867 15868 // Newton iterations: Est = Est * (1.5 - HalfArg * Est * Est) 15869 for (unsigned i = 0; i < Iterations; ++i) { 15870 SDValue NewEst = DAG.getNode(ISD::FMUL, DL, VT, Est, Est, Flags); 15871 AddToWorklist(NewEst.getNode()); 15872 15873 NewEst = DAG.getNode(ISD::FMUL, DL, VT, HalfArg, NewEst, Flags); 15874 AddToWorklist(NewEst.getNode()); 15875 15876 NewEst = DAG.getNode(ISD::FSUB, DL, VT, ThreeHalves, NewEst, Flags); 15877 AddToWorklist(NewEst.getNode()); 15878 15879 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, NewEst, Flags); 15880 AddToWorklist(Est.getNode()); 15881 } 15882 15883 // If non-reciprocal square root is requested, multiply the result by Arg. 15884 if (!Reciprocal) { 15885 Est = DAG.getNode(ISD::FMUL, DL, VT, Est, Arg, Flags); 15886 AddToWorklist(Est.getNode()); 15887 } 15888 15889 return Est; 15890 } 15891 15892 /// Newton iteration for a function: F(X) is X_{i+1} = X_i - F(X_i)/F'(X_i) 15893 /// For the reciprocal sqrt, we need to find the zero of the function: 15894 /// F(X) = 1/X^2 - A [which has a zero at X = 1/sqrt(A)] 15895 /// => 15896 /// X_{i+1} = (-0.5 * X_i) * (A * X_i * X_i + (-3.0)) 15897 SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est, 15898 unsigned Iterations, 15899 SDNodeFlags *Flags, bool Reciprocal) { 15900 EVT VT = Arg.getValueType(); 15901 SDLoc DL(Arg); 15902 SDValue MinusThree = DAG.getConstantFP(-3.0, DL, VT); 15903 SDValue MinusHalf = DAG.getConstantFP(-0.5, DL, VT); 15904 15905 // This routine must enter the loop below to work correctly 15906 // when (Reciprocal == false). 15907 assert(Iterations > 0); 15908 15909 // Newton iterations for reciprocal square root: 15910 // E = (E * -0.5) * ((A * E) * E + -3.0) 15911 for (unsigned i = 0; i < Iterations; ++i) { 15912 SDValue AE = DAG.getNode(ISD::FMUL, DL, VT, Arg, Est, Flags); 15913 AddToWorklist(AE.getNode()); 15914 15915 SDValue AEE = DAG.getNode(ISD::FMUL, DL, VT, AE, Est, Flags); 15916 AddToWorklist(AEE.getNode()); 15917 15918 SDValue RHS = DAG.getNode(ISD::FADD, DL, VT, AEE, MinusThree, Flags); 15919 AddToWorklist(RHS.getNode()); 15920 15921 // When calculating a square root at the last iteration build: 15922 // S = ((A * E) * -0.5) * ((A * E) * E + -3.0) 15923 // (notice a common subexpression) 15924 SDValue LHS; 15925 if (Reciprocal || (i + 1) < Iterations) { 15926 // RSQRT: LHS = (E * -0.5) 15927 LHS = DAG.getNode(ISD::FMUL, DL, VT, Est, MinusHalf, Flags); 15928 } else { 15929 // SQRT: LHS = (A * E) * -0.5 15930 LHS = DAG.getNode(ISD::FMUL, DL, VT, AE, MinusHalf, Flags); 15931 } 15932 AddToWorklist(LHS.getNode()); 15933 15934 Est = DAG.getNode(ISD::FMUL, DL, VT, LHS, RHS, Flags); 15935 AddToWorklist(Est.getNode()); 15936 } 15937 15938 return Est; 15939 } 15940 15941 /// Build code to calculate either rsqrt(Op) or sqrt(Op). In the latter case 15942 /// Op*rsqrt(Op) is actually computed, so additional postprocessing is needed if 15943 /// Op can be zero. 15944 SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags *Flags, 15945 bool Reciprocal) { 15946 if (Level >= AfterLegalizeDAG) 15947 return SDValue(); 15948 15949 // TODO: Handle half and/or extended types? 15950 EVT VT = Op.getValueType(); 15951 if (VT.getScalarType() != MVT::f32 && VT.getScalarType() != MVT::f64) 15952 return SDValue(); 15953 15954 // If estimates are explicitly disabled for this function, we're done. 15955 MachineFunction &MF = DAG.getMachineFunction(); 15956 int Enabled = TLI.getRecipEstimateSqrtEnabled(VT, MF); 15957 if (Enabled == TLI.ReciprocalEstimate::Disabled) 15958 return SDValue(); 15959 15960 // Estimates may be explicitly enabled for this type with a custom number of 15961 // refinement steps. 15962 int Iterations = TLI.getSqrtRefinementSteps(VT, MF); 15963 15964 bool UseOneConstNR = false; 15965 if (SDValue Est = 15966 TLI.getSqrtEstimate(Op, DAG, Enabled, Iterations, UseOneConstNR, 15967 Reciprocal)) { 15968 AddToWorklist(Est.getNode()); 15969 15970 if (Iterations) { 15971 Est = UseOneConstNR 15972 ? buildSqrtNROneConst(Op, Est, Iterations, Flags, Reciprocal) 15973 : buildSqrtNRTwoConst(Op, Est, Iterations, Flags, Reciprocal); 15974 15975 if (!Reciprocal) { 15976 // Unfortunately, Est is now NaN if the input was exactly 0.0. 15977 // Select out this case and force the answer to 0.0. 15978 EVT VT = Op.getValueType(); 15979 SDLoc DL(Op); 15980 15981 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); 15982 EVT CCVT = getSetCCResultType(VT); 15983 SDValue ZeroCmp = DAG.getSetCC(DL, CCVT, Op, FPZero, ISD::SETEQ); 15984 AddToWorklist(ZeroCmp.getNode()); 15985 15986 Est = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, DL, VT, 15987 ZeroCmp, FPZero, Est); 15988 AddToWorklist(Est.getNode()); 15989 } 15990 } 15991 return Est; 15992 } 15993 15994 return SDValue(); 15995 } 15996 15997 SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags *Flags) { 15998 return buildSqrtEstimateImpl(Op, Flags, true); 15999 } 16000 16001 SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags) { 16002 return buildSqrtEstimateImpl(Op, Flags, false); 16003 } 16004 16005 /// Return true if base is a frame index, which is known not to alias with 16006 /// anything but itself. Provides base object and offset as results. 16007 static bool FindBaseOffset(SDValue Ptr, SDValue &Base, int64_t &Offset, 16008 const GlobalValue *&GV, const void *&CV) { 16009 // Assume it is a primitive operation. 16010 Base = Ptr; Offset = 0; GV = nullptr; CV = nullptr; 16011 16012 // If it's an adding a simple constant then integrate the offset. 16013 if (Base.getOpcode() == ISD::ADD) { 16014 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Base.getOperand(1))) { 16015 Base = Base.getOperand(0); 16016 Offset += C->getSExtValue(); 16017 } 16018 } 16019 16020 // Return the underlying GlobalValue, and update the Offset. Return false 16021 // for GlobalAddressSDNode since the same GlobalAddress may be represented 16022 // by multiple nodes with different offsets. 16023 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Base)) { 16024 GV = G->getGlobal(); 16025 Offset += G->getOffset(); 16026 return false; 16027 } 16028 16029 // Return the underlying Constant value, and update the Offset. Return false 16030 // for ConstantSDNodes since the same constant pool entry may be represented 16031 // by multiple nodes with different offsets. 16032 if (ConstantPoolSDNode *C = dyn_cast<ConstantPoolSDNode>(Base)) { 16033 CV = C->isMachineConstantPoolEntry() ? (const void *)C->getMachineCPVal() 16034 : (const void *)C->getConstVal(); 16035 Offset += C->getOffset(); 16036 return false; 16037 } 16038 // If it's any of the following then it can't alias with anything but itself. 16039 return isa<FrameIndexSDNode>(Base); 16040 } 16041 16042 /// Return true if there is any possibility that the two addresses overlap. 16043 bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const { 16044 // If they are the same then they must be aliases. 16045 if (Op0->getBasePtr() == Op1->getBasePtr()) return true; 16046 16047 // If they are both volatile then they cannot be reordered. 16048 if (Op0->isVolatile() && Op1->isVolatile()) return true; 16049 16050 // If one operation reads from invariant memory, and the other may store, they 16051 // cannot alias. These should really be checking the equivalent of mayWrite, 16052 // but it only matters for memory nodes other than load /store. 16053 if (Op0->isInvariant() && Op1->writeMem()) 16054 return false; 16055 16056 if (Op1->isInvariant() && Op0->writeMem()) 16057 return false; 16058 16059 // Gather base node and offset information. 16060 SDValue Base1, Base2; 16061 int64_t Offset1, Offset2; 16062 const GlobalValue *GV1, *GV2; 16063 const void *CV1, *CV2; 16064 bool isFrameIndex1 = FindBaseOffset(Op0->getBasePtr(), 16065 Base1, Offset1, GV1, CV1); 16066 bool isFrameIndex2 = FindBaseOffset(Op1->getBasePtr(), 16067 Base2, Offset2, GV2, CV2); 16068 16069 // If they have a same base address then check to see if they overlap. 16070 if (Base1 == Base2 || (GV1 && (GV1 == GV2)) || (CV1 && (CV1 == CV2))) 16071 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || 16072 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); 16073 16074 // It is possible for different frame indices to alias each other, mostly 16075 // when tail call optimization reuses return address slots for arguments. 16076 // To catch this case, look up the actual index of frame indices to compute 16077 // the real alias relationship. 16078 if (isFrameIndex1 && isFrameIndex2) { 16079 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 16080 Offset1 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base1)->getIndex()); 16081 Offset2 += MFI.getObjectOffset(cast<FrameIndexSDNode>(Base2)->getIndex()); 16082 return !((Offset1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= Offset2 || 16083 (Offset2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= Offset1); 16084 } 16085 16086 // Otherwise, if we know what the bases are, and they aren't identical, then 16087 // we know they cannot alias. 16088 if ((isFrameIndex1 || CV1 || GV1) && (isFrameIndex2 || CV2 || GV2)) 16089 return false; 16090 16091 // If we know required SrcValue1 and SrcValue2 have relatively large alignment 16092 // compared to the size and offset of the access, we may be able to prove they 16093 // do not alias. This check is conservative for now to catch cases created by 16094 // splitting vector types. 16095 if ((Op0->getOriginalAlignment() == Op1->getOriginalAlignment()) && 16096 (Op0->getSrcValueOffset() != Op1->getSrcValueOffset()) && 16097 (Op0->getMemoryVT().getSizeInBits() >> 3 == 16098 Op1->getMemoryVT().getSizeInBits() >> 3) && 16099 (Op0->getOriginalAlignment() > (Op0->getMemoryVT().getSizeInBits() >> 3))) { 16100 int64_t OffAlign1 = Op0->getSrcValueOffset() % Op0->getOriginalAlignment(); 16101 int64_t OffAlign2 = Op1->getSrcValueOffset() % Op1->getOriginalAlignment(); 16102 16103 // There is no overlap between these relatively aligned accesses of similar 16104 // size, return no alias. 16105 if ((OffAlign1 + (Op0->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign2 || 16106 (OffAlign2 + (Op1->getMemoryVT().getSizeInBits() >> 3)) <= OffAlign1) 16107 return false; 16108 } 16109 16110 bool UseAA = CombinerGlobalAA.getNumOccurrences() > 0 16111 ? CombinerGlobalAA 16112 : DAG.getSubtarget().useAA(); 16113 #ifndef NDEBUG 16114 if (CombinerAAOnlyFunc.getNumOccurrences() && 16115 CombinerAAOnlyFunc != DAG.getMachineFunction().getName()) 16116 UseAA = false; 16117 #endif 16118 if (UseAA && 16119 Op0->getMemOperand()->getValue() && Op1->getMemOperand()->getValue()) { 16120 // Use alias analysis information. 16121 int64_t MinOffset = std::min(Op0->getSrcValueOffset(), 16122 Op1->getSrcValueOffset()); 16123 int64_t Overlap1 = (Op0->getMemoryVT().getSizeInBits() >> 3) + 16124 Op0->getSrcValueOffset() - MinOffset; 16125 int64_t Overlap2 = (Op1->getMemoryVT().getSizeInBits() >> 3) + 16126 Op1->getSrcValueOffset() - MinOffset; 16127 AliasResult AAResult = 16128 AA.alias(MemoryLocation(Op0->getMemOperand()->getValue(), Overlap1, 16129 UseTBAA ? Op0->getAAInfo() : AAMDNodes()), 16130 MemoryLocation(Op1->getMemOperand()->getValue(), Overlap2, 16131 UseTBAA ? Op1->getAAInfo() : AAMDNodes())); 16132 if (AAResult == NoAlias) 16133 return false; 16134 } 16135 16136 // Otherwise we have to assume they alias. 16137 return true; 16138 } 16139 16140 /// Walk up chain skipping non-aliasing memory nodes, 16141 /// looking for aliasing nodes and adding them to the Aliases vector. 16142 void DAGCombiner::GatherAllAliases(SDNode *N, SDValue OriginalChain, 16143 SmallVectorImpl<SDValue> &Aliases) { 16144 SmallVector<SDValue, 8> Chains; // List of chains to visit. 16145 SmallPtrSet<SDNode *, 16> Visited; // Visited node set. 16146 16147 // Get alias information for node. 16148 bool IsLoad = isa<LoadSDNode>(N) && !cast<LSBaseSDNode>(N)->isVolatile(); 16149 16150 // Starting off. 16151 Chains.push_back(OriginalChain); 16152 unsigned Depth = 0; 16153 16154 // Look at each chain and determine if it is an alias. If so, add it to the 16155 // aliases list. If not, then continue up the chain looking for the next 16156 // candidate. 16157 while (!Chains.empty()) { 16158 SDValue Chain = Chains.pop_back_val(); 16159 16160 // For TokenFactor nodes, look at each operand and only continue up the 16161 // chain until we reach the depth limit. 16162 // 16163 // FIXME: The depth check could be made to return the last non-aliasing 16164 // chain we found before we hit a tokenfactor rather than the original 16165 // chain. 16166 if (Depth > TLI.getGatherAllAliasesMaxDepth()) { 16167 Aliases.clear(); 16168 Aliases.push_back(OriginalChain); 16169 return; 16170 } 16171 16172 // Don't bother if we've been before. 16173 if (!Visited.insert(Chain.getNode()).second) 16174 continue; 16175 16176 switch (Chain.getOpcode()) { 16177 case ISD::EntryToken: 16178 // Entry token is ideal chain operand, but handled in FindBetterChain. 16179 break; 16180 16181 case ISD::LOAD: 16182 case ISD::STORE: { 16183 // Get alias information for Chain. 16184 bool IsOpLoad = isa<LoadSDNode>(Chain.getNode()) && 16185 !cast<LSBaseSDNode>(Chain.getNode())->isVolatile(); 16186 16187 // If chain is alias then stop here. 16188 if (!(IsLoad && IsOpLoad) && 16189 isAlias(cast<LSBaseSDNode>(N), cast<LSBaseSDNode>(Chain.getNode()))) { 16190 Aliases.push_back(Chain); 16191 } else { 16192 // Look further up the chain. 16193 Chains.push_back(Chain.getOperand(0)); 16194 ++Depth; 16195 } 16196 break; 16197 } 16198 16199 case ISD::TokenFactor: 16200 // We have to check each of the operands of the token factor for "small" 16201 // token factors, so we queue them up. Adding the operands to the queue 16202 // (stack) in reverse order maintains the original order and increases the 16203 // likelihood that getNode will find a matching token factor (CSE.) 16204 if (Chain.getNumOperands() > 16) { 16205 Aliases.push_back(Chain); 16206 break; 16207 } 16208 for (unsigned n = Chain.getNumOperands(); n;) 16209 Chains.push_back(Chain.getOperand(--n)); 16210 ++Depth; 16211 break; 16212 16213 case ISD::CopyFromReg: 16214 // Forward past CopyFromReg. 16215 Chains.push_back(Chain.getOperand(0)); 16216 ++Depth; 16217 break; 16218 16219 default: 16220 // For all other instructions we will just have to take what we can get. 16221 Aliases.push_back(Chain); 16222 break; 16223 } 16224 } 16225 } 16226 16227 /// Walk up chain skipping non-aliasing memory nodes, looking for a better chain 16228 /// (aliasing node.) 16229 SDValue DAGCombiner::FindBetterChain(SDNode *N, SDValue OldChain) { 16230 SmallVector<SDValue, 8> Aliases; // Ops for replacing token factor. 16231 16232 // Accumulate all the aliases to this node. 16233 GatherAllAliases(N, OldChain, Aliases); 16234 16235 // If no operands then chain to entry token. 16236 if (Aliases.size() == 0) 16237 return DAG.getEntryNode(); 16238 16239 // If a single operand then chain to it. We don't need to revisit it. 16240 if (Aliases.size() == 1) 16241 return Aliases[0]; 16242 16243 // Construct a custom tailored token factor. 16244 return DAG.getNode(ISD::TokenFactor, SDLoc(N), MVT::Other, Aliases); 16245 } 16246 16247 // This function tries to collect a bunch of potentially interesting 16248 // nodes to improve the chains of, all at once. This might seem 16249 // redundant, as this function gets called when visiting every store 16250 // node, so why not let the work be done on each store as it's visited? 16251 // 16252 // I believe this is mainly important because MergeConsecutiveStores 16253 // is unable to deal with merging stores of different sizes, so unless 16254 // we improve the chains of all the potential candidates up-front 16255 // before running MergeConsecutiveStores, it might only see some of 16256 // the nodes that will eventually be candidates, and then not be able 16257 // to go from a partially-merged state to the desired final 16258 // fully-merged state. 16259 bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) { 16260 // This holds the base pointer, index, and the offset in bytes from the base 16261 // pointer. 16262 BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG); 16263 16264 // We must have a base and an offset. 16265 if (!BasePtr.Base.getNode()) 16266 return false; 16267 16268 // Do not handle stores to undef base pointers. 16269 if (BasePtr.Base.isUndef()) 16270 return false; 16271 16272 SmallVector<StoreSDNode *, 8> ChainedStores; 16273 ChainedStores.push_back(St); 16274 16275 // Walk up the chain and look for nodes with offsets from the same 16276 // base pointer. Stop when reaching an instruction with a different kind 16277 // or instruction which has a different base pointer. 16278 StoreSDNode *Index = St; 16279 while (Index) { 16280 // If the chain has more than one use, then we can't reorder the mem ops. 16281 if (Index != St && !SDValue(Index, 0)->hasOneUse()) 16282 break; 16283 16284 if (Index->isVolatile() || Index->isIndexed()) 16285 break; 16286 16287 // Find the base pointer and offset for this memory node. 16288 BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG); 16289 16290 // Check that the base pointer is the same as the original one. 16291 if (!Ptr.equalBaseIndex(BasePtr)) 16292 break; 16293 16294 // Walk up the chain to find the next store node, ignoring any 16295 // intermediate loads. Any other kind of node will halt the loop. 16296 SDNode *NextInChain = Index->getChain().getNode(); 16297 while (true) { 16298 if (StoreSDNode *STn = dyn_cast<StoreSDNode>(NextInChain)) { 16299 // We found a store node. Use it for the next iteration. 16300 if (STn->isVolatile() || STn->isIndexed()) { 16301 Index = nullptr; 16302 break; 16303 } 16304 ChainedStores.push_back(STn); 16305 Index = STn; 16306 break; 16307 } else if (LoadSDNode *Ldn = dyn_cast<LoadSDNode>(NextInChain)) { 16308 NextInChain = Ldn->getChain().getNode(); 16309 continue; 16310 } else { 16311 Index = nullptr; 16312 break; 16313 } 16314 } // end while 16315 } 16316 16317 // At this point, ChainedStores lists all of the Store nodes 16318 // reachable by iterating up through chain nodes matching the above 16319 // conditions. For each such store identified, try to find an 16320 // earlier chain to attach the store to which won't violate the 16321 // required ordering. 16322 bool MadeChangeToSt = false; 16323 SmallVector<std::pair<StoreSDNode *, SDValue>, 8> BetterChains; 16324 16325 for (StoreSDNode *ChainedStore : ChainedStores) { 16326 SDValue Chain = ChainedStore->getChain(); 16327 SDValue BetterChain = FindBetterChain(ChainedStore, Chain); 16328 16329 if (Chain != BetterChain) { 16330 if (ChainedStore == St) 16331 MadeChangeToSt = true; 16332 BetterChains.push_back(std::make_pair(ChainedStore, BetterChain)); 16333 } 16334 } 16335 16336 // Do all replacements after finding the replacements to make to avoid making 16337 // the chains more complicated by introducing new TokenFactors. 16338 for (auto Replacement : BetterChains) 16339 replaceStoreChain(Replacement.first, Replacement.second); 16340 16341 return MadeChangeToSt; 16342 } 16343 16344 /// This is the entry point for the file. 16345 void SelectionDAG::Combine(CombineLevel Level, AliasAnalysis &AA, 16346 CodeGenOpt::Level OptLevel) { 16347 /// This is the main entry point to this class. 16348 DAGCombiner(*this, AA, OptLevel).Run(Level); 16349 } 16350